diff --git a/build.zig b/build.zig index 847303340d..9cfebebc56 100644 --- a/build.zig +++ b/build.zig @@ -30,6 +30,7 @@ pub fn build(b: *std.Build) !void { const test_step = b.step("test", "Run all the tests"); const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse false; const skip_install_langref = b.option(bool, "no-langref", "skip copying of langref to the installation prefix") orelse skip_install_lib_files; + const no_bin = b.option(bool, "no-bin", "skip emitting compiler binary") orelse false; const docgen_exe = b.addExecutable(.{ .name = "docgen", @@ -166,6 +167,7 @@ pub fn build(b: *std.Build) !void { exe.pie = pie; exe.sanitize_thread = sanitize_thread; exe.entitlements = entitlements; + if (no_bin) exe.emit_bin = .no_emit; exe.build_id = b.option( std.Build.Step.Compile.BuildId, diff --git a/doc/langref.html.in b/doc/langref.html.in index 14dda686a9..6740d147bd 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -10176,7 +10176,7 @@ pub fn main() void { {#header_open|Invalid Error Set Cast#}

At compile-time:

- {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{A,C}'#} + {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{C,A}'#} const Set1 = error{ A, B, diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index bbfa588d6d..c2a2486dfa 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to the removed element. @@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(allocator, self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to last element. diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index ec69270d15..ef93bb14ee 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -143,7 +143,7 @@ pub const Mode = OptimizeMode; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const CallingConvention = enum { +pub const CallingConvention = enum(u8) { /// This is the default Zig calling convention used when not using `export` on `fn` /// and no other calling convention is specified. Unspecified, @@ -190,7 +190,7 @@ pub const CallingConvention = enum { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const AddressSpace = enum { +pub const AddressSpace = enum(u5) { generic, gs, fs, @@ -283,7 +283,7 @@ pub const Type = union(enum) { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. - pub const Size = enum { + pub const Size = enum(u2) { One, Many, Slice, diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 7cf0f4681a..db85242002 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -530,7 +530,7 @@ pub const ChildProcess = struct { // can fail between fork() and execve(). // Therefore, we do all the allocation for the execve() before the fork(). // This means we must do the null-termination of argv and env vars here. - const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null); + const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { @@ -542,7 +542,7 @@ pub const ChildProcess = struct { } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr); + break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 2745bd4e6f..5b9b00538a 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -1256,10 +1256,8 @@ fn limitedOverlapCopy(frag: []u8, in: usize) void { // A single, non-overlapping memcpy suffices. @memcpy(frag[0..first.len], first); } else { - // Need two memcpy calls because one alone would overlap. - @memcpy(frag[0..in], first[0..in]); - const leftover = first.len - in; - @memcpy(frag[in..][0..leftover], first[in..][0..leftover]); + // One memcpy call would overlap, so just do this instead. + std.mem.copyForwards(u8, frag, first); } } diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index dbe1f6e8b6..4de08b25d7 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -936,6 +936,7 @@ pub const DwarfInfo = struct { const ranges_val = compile_unit.die.getAttr(AT.ranges) orelse continue; const ranges_offset = switch (ranges_val.*) { .SecOffset => |off| off, + .Const => |c| try c.asUnsignedLe(), .RangeListOffset => |idx| off: { if (compile_unit.is_64) { const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx); diff --git a/lib/std/hash.zig b/lib/std/hash.zig index 5c85b38d55..eca7a70159 100644 --- a/lib/std/hash.zig +++ b/lib/std/hash.zig @@ -36,6 +36,20 @@ const xxhash = @import("hash/xxhash.zig"); pub const XxHash64 = xxhash.XxHash64; pub const XxHash32 = xxhash.XxHash32; +/// This is handy if you have a u32 and want a u32 and don't want to take a +/// detour through many layers of abstraction elsewhere in the std.hash +/// namespace. +/// Copied from https://nullprogram.com/blog/2018/07/31/ +pub fn uint32(input: u32) u32 { + var x: u32 = input; + x ^= x >> 16; + x *%= 0x7feb352d; + x ^= x >> 15; + x *%= 0x846ca68b; + x ^= x >> 16; + return x; +} + test { _ = adler; _ = auto_hash; diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index 0c88caae7e..251ac120f6 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -91,15 +91,21 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { // Help the optimizer see that hashing an int is easy by inlining! // TODO Check if the situation is better after #561 is resolved. - .Int => { - if (comptime meta.trait.hasUniqueRepresentation(Key)) { - @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); - } else { - // Take only the part containing the key value, the remaining - // bytes are undefined and must not be hashed! - const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable; - @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] }); - } + .Int => |int| switch (int.signedness) { + .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{ + .bits = int.bits, + .signedness = .unsigned, + } }), key), strat), + .unsigned => { + if (comptime meta.trait.hasUniqueRepresentation(Key)) { + @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); + } else { + // Take only the part containing the key value, the remaining + // bytes are undefined and must not be hashed! + const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable; + @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] }); + } + }, }, .Bool => hash(hasher, @boolToInt(key), strat), diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index c4d3ccf077..ec79d843da 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -2158,6 +2158,9 @@ pub const Const = struct { pub fn to(self: Const, comptime T: type) ConvertError!T { switch (@typeInfo(T)) { .Int => |info| { + // Make sure -0 is handled correctly. + if (self.eqZero()) return 0; + const UT = std.meta.Int(.unsigned, info.bits); if (!self.fitsInTwosComp(info.signedness, info.bits)) { @@ -2509,7 +2512,7 @@ pub const Const = struct { return total_limb_lz + bits - total_limb_bits; } - pub fn ctz(a: Const) Limb { + pub fn ctz(a: Const, bits: Limb) Limb { // Limbs are stored in little-endian order. var result: Limb = 0; for (a.limbs) |limb| { @@ -2517,7 +2520,7 @@ pub const Const = struct { result += limb_tz; if (limb_tz != @sizeOf(Limb) * 8) break; } - return result; + return @min(result, bits); } }; diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 212d09a1a8..d6ca4a9ea1 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4226,7 +4226,8 @@ pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(isValidAlignGeneric(T, alignment)); + assert(alignment > 0); + assert(std.math.isPowerOfTwo(alignment)); return alignBackwardGeneric(T, addr + (alignment - 1), alignment); } diff --git a/lib/std/process.zig b/lib/std/process.zig index 80be705187..6ad0df868e 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1131,7 +1131,7 @@ pub fn execve( defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null); + const argv_buf = try arena.allocSentinel(?[*:0]const u8, argv.len, null); for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { @@ -1143,7 +1143,7 @@ pub fn execve( } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr); + break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); diff --git a/src/Air.zig b/src/Air.zig index 7ee36206f1..b179a3c024 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -5,16 +5,18 @@ const std = @import("std"); const builtin = @import("builtin"); +const assert = std.debug.assert; + +const Air = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; -const assert = std.debug.assert; -const Air = @This(); +const InternPool = @import("InternPool.zig"); +const Module = @import("Module.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. extra: []const u32, -values: []const Value, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -183,6 +185,18 @@ pub const Inst = struct { /// Allocates stack local memory. /// Uses the `ty` field. alloc, + /// This special instruction only exists temporarily during semantic + /// analysis and is guaranteed to be unreachable in machine code + /// backends. It tracks a set of types that have been stored to an + /// inferred allocation. + /// Uses the `inferred_alloc` field. + inferred_alloc, + /// This special instruction only exists temporarily during semantic + /// analysis and is guaranteed to be unreachable in machine code + /// backends. Used to coordinate alloc_inferred, store_to_inferred_ptr, + /// and resolve_inferred_alloc instructions for comptime code. + /// Uses the `inferred_alloc_comptime` field. + inferred_alloc_comptime, /// If the function will pass the result by-ref, this instruction returns the /// result pointer. Otherwise it is equivalent to `alloc`. /// Uses the `ty` field. @@ -394,11 +408,9 @@ pub const Inst = struct { /// was executed on the operand. /// Uses the `ty_pl` field. Payload is `TryPtr`. try_ptr, - /// A comptime-known value. Uses the `ty_pl` field, payload is index of - /// `values` array. - constant, - /// A comptime-known type. Uses the `ty` field. - const_ty, + /// A comptime-known value via an index into the InternPool. + /// Uses the `interned` field. + interned, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -408,10 +420,10 @@ pub const Inst = struct { /// Marks the end of a semantic scope for debug info variables. dbg_block_end, /// Marks the start of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_begin, /// Marks the end of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_end, /// Marks the beginning of a local variable. The operand is a pointer pointing /// to the storage for the variable. The local may be a const or a var. @@ -837,7 +849,96 @@ pub const Inst = struct { /// The position of an AIR instruction within the `Air` instructions array. pub const Index = u32; - pub const Ref = @import("Zir.zig").Inst.Ref; + pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + zero_u8 = @enumToInt(InternPool.Index.zero_u8), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), + negative_one = @enumToInt(InternPool.Index.negative_one), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This Ref does not correspond to any AIR instruction or constant + /// value. It is used to handle argument types of var args functions. + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + /// This Ref does not correspond to any AIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = @enumToInt(InternPool.Index.none), + _, + }; /// All instructions have an 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as @@ -845,6 +946,7 @@ pub const Inst = struct { pub const Data = union { no_op: void, un_op: Ref, + interned: InternPool.Index, bin_op: struct { lhs: Ref, @@ -864,6 +966,10 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_fn: struct { + ty: Ref, + func: Module.Fn.Index, + }, br: struct { block_inst: Index, operand: Ref, @@ -896,6 +1002,19 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + inferred_alloc_comptime: InferredAllocComptime, + inferred_alloc: InferredAlloc, + + pub const InferredAllocComptime = struct { + decl_index: Module.Decl.Index, + alignment: InternPool.Alignment, + is_const: bool, + }; + + pub const InferredAlloc = struct { + alignment: InternPool.Alignment, + is_const: bool, + }; // Make sure we don't accidentally add a field to make this union // bigger than expected. Note that in Debug builds, Zig is allowed @@ -974,8 +1093,7 @@ pub const FieldParentPtr = struct { pub const Shuffle = struct { a: Inst.Ref, b: Inst.Ref, - // index to air_values - mask: u32, + mask: InternPool.Index, mask_len: u32, }; @@ -1064,15 +1182,15 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].ty; + if (ref_int < InternPool.static_keys.len) { + return InternPool.static_keys[ref_int].typeOf().toType(); } - return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); + return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1114,7 +1232,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .div_exact_optimized, .rem_optimized, .mod_optimized, - => return air.typeOf(datas[inst].bin_op.lhs), + => return air.typeOf(datas[inst].bin_op.lhs, ip), .sqrt, .sin, @@ -1132,7 +1250,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .trunc_float, .neg, .neg_optimized, - => return air.typeOf(datas[inst].un_op), + => return air.typeOf(datas[inst].un_op, ip), .cmp_lt, .cmp_lte, @@ -1159,8 +1277,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .error_set_has_value, => return Type.bool, - .const_ty => return Type.type, - .alloc, .ret_ptr, .err_return_trace, @@ -1171,7 +1287,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .assembly, .block, - .constant, .struct_field_ptr, .struct_field_val, .slice_elem_ptr, @@ -1194,6 +1309,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .try_ptr, => return air.getRefType(datas[inst].ty_pl.ty), + .interned => return ip.typeOf(datas[inst].interned).toType(), + .not, .bitcast, .load, @@ -1243,7 +1360,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .ret_load, .unreach, .trap, - => return Type.initTag(.noreturn), + => return Type.noreturn, .breakpoint, .dbg_stmt, @@ -1280,63 +1397,67 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .wasm_memory_grow => return Type.i32, .wasm_memory_size => return Type.u32, - .bool_to_int => return Type.initTag(.u1), + .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), + .tag_name, .error_name => return Type.slice_const_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { - const callee_ty = air.typeOf(datas[inst].pl_op.operand); - switch (callee_ty.zigTypeTag()) { - .Fn => return callee_ty.fnReturnType(), - .Pointer => return callee_ty.childType().fnReturnType(), - else => unreachable, - } + const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); + return callee_ty.fnReturnTypeIp(ip); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { - const ptr_ty = air.typeOf(datas[inst].bin_op.lhs); - return ptr_ty.elemType(); + const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); + return ptr_ty.childTypeIp(ip); }, .atomic_load => { - const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr); - return ptr_ty.elemType(); + const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); + return ptr_ty.childTypeIp(ip); }, .atomic_rmw => { - const ptr_ty = air.typeOf(datas[inst].pl_op.operand); - return ptr_ty.elemType(); + const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); + return ptr_ty.childTypeIp(ip); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(), + .reduce, .reduce_optimized => { + const operand_ty = air.typeOf(datas[inst].reduce.operand, ip); + return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType(); + }, - .mul_add => return air.typeOf(datas[inst].pl_op.operand), + .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { const extra = air.extraData(Air.Bin, datas[inst].pl_op.payload).data; - return air.typeOf(extra.lhs); + return air.typeOf(extra.lhs, ip); }, .@"try" => { - const err_union_ty = air.typeOf(datas[inst].pl_op.operand); - return err_union_ty.errorUnionPayload(); + const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); + return ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type.toType(); }, .work_item_id, .work_group_size, .work_group_id, => return Type.u32, + + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, } } pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @enumToInt(ref); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - var buffer: Value.ToTypeBuffer = undefined; - return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer); + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toType(); } - const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[inst_index] == .const_ty); - return air_datas[inst_index].ty; + return switch (air_tags[inst_index]) { + .interned => air_datas[inst_index].interned.toType(), + else => unreachable, + }; } /// Returns the requested data, as well as the new index which is at the start of the @@ -1350,7 +1471,8 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end u32 => air.extra[i], Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]), i32 => @bitCast(i32, air.extra[i]), - else => @compileError("bad field type"), + InternPool.Index => @intToEnum(InternPool.Index, air.extra[i]), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; } @@ -1363,17 +1485,17 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.instructions.deinit(gpa); gpa.free(air.extra); - gpa.free(air.values); air.* = undefined; } -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; -pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { - return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +pub fn indexToRef(inst: Inst.Index) Inst.Ref { + return @intToEnum(Inst.Ref, ref_start_index + inst); } -pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + assert(inst != .none); const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; @@ -1382,18 +1504,23 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } } +pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { + if (inst == .none) return null; + return refToIndex(inst); +} + /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].val; + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { - .constant => return air.values[air_datas[inst_index].ty_pl.payload], - .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(), + .interned => return air_datas[inst_index].interned.toValue(), + else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod), } } @@ -1406,10 +1533,11 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { return bytes[0..end :0]; } -/// Returns whether the given instruction must always be lowered, for instance because it can cause -/// side effects. If an instruction does not need to be lowered, and Liveness determines its result -/// is unused, backends should avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { +/// Returns whether the given instruction must always be lowered, for instance +/// because it can cause side effects. If an instruction does not need to be +/// lowered, and Liveness determines its result is unused, backends should +/// avoid lowering it. +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, @@ -1498,6 +1626,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .mul_with_overflow, .shl_with_overflow, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, .bit_and, .bit_or, @@ -1546,8 +1676,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .cmp_neq_optimized, .cmp_vector, .cmp_vector_optimized, - .constant, - .const_ty, + .interned, .is_null, .is_non_null, .is_null_ptr, @@ -1616,8 +1745,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } diff --git a/src/AstGen.zig b/src/AstGen.zig index b38067fd03..17cf2aae64 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3934,7 +3934,7 @@ fn fnDecl( var section_gz = decl_gz.makeSubBlock(params_scope); defer section_gz.unstack(); const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); if (section_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -4137,7 +4137,7 @@ fn globalVarDecl( break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { - break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); @@ -4497,7 +4497,7 @@ fn testDecl( .cc_gz = null, .align_ref = .none, .align_gz = null, - .ret_ref = .void_type, + .ret_ref = .anyerror_void_error_union_type, .ret_gz = null, .section_ref = .none, .section_gz = null, @@ -4510,7 +4510,7 @@ fn testDecl( .body_gz = &fn_block, .lib_name = 0, .is_var_args = false, - .is_inferred_error = true, + .is_inferred_error = false, .is_test = true, .is_extern = false, .is_noinline = false, @@ -7878,7 +7878,7 @@ fn unionInit( params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, @@ -8100,12 +8100,12 @@ fn builtinCall( if (ri.rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); return rvalue(gz, ri, result, node); }, @@ -8271,11 +8271,11 @@ fn builtinCall( .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error), .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file), .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), @@ -8334,7 +8334,7 @@ fn builtinCall( }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic); }, .trap => { try emitDbgNode(gz, node); @@ -8450,7 +8450,7 @@ fn builtinCall( }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]); const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), @@ -8530,7 +8530,7 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, .call => { - const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]); + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); const args = try expr(gz, scope, .{ .rl = .none }, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ @@ -8546,7 +8546,7 @@ fn builtinCall( }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, @@ -8701,7 +8701,7 @@ fn hasDeclOrField( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, @@ -8851,7 +8851,7 @@ fn simpleCBuiltin( ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, @@ -8869,7 +8869,7 @@ fn offsetOf( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, @@ -10271,6 +10271,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.i32_type), as_ty | @enumToInt(Zir.Inst.Ref.u64_type), as_ty | @enumToInt(Zir.Inst.Ref.i64_type), + as_ty | @enumToInt(Zir.Inst.Ref.u128_type), + as_ty | @enumToInt(Zir.Inst.Ref.i128_type), as_ty | @enumToInt(Zir.Inst.Ref.usize_type), as_ty | @enumToInt(Zir.Inst.Ref.isize_type), as_ty | @enumToInt(Zir.Inst.Ref.c_char_type), @@ -10296,15 +10298,30 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type), as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyframe_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_order_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_rmw_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.calling_convention_type), + as_ty | @enumToInt(Zir.Inst.Ref.address_space_type), + as_ty | @enumToInt(Zir.Inst.Ref.float_mode_type), + as_ty | @enumToInt(Zir.Inst.Ref.reduce_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.call_modifier_type), + as_ty | @enumToInt(Zir.Inst.Ref.prefetch_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.export_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.extern_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.type_info_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), + as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), + as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), as_comptime_int | @enumToInt(Zir.Inst.Ref.zero), as_comptime_int | @enumToInt(Zir.Inst.Ref.one), as_bool | @enumToInt(Zir.Inst.Ref.bool_true), @@ -10677,8 +10694,8 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const string_bytes = &astgen.string_bytes; const str_index = @intCast(u32, string_bytes.items.len); try astgen.appendIdentStr(ident_token, string_bytes); - const key = string_bytes.items[str_index..]; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ .bytes = string_bytes, }, StringIndexContext{ .bytes = string_bytes, diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 879f0a6b15..1cdb768311 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -8,6 +8,7 @@ const CompilationModule = @import("Module.zig"); const File = CompilationModule.File; const Module = @import("Package.zig"); const Tokenizer = std.zig.Tokenizer; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Ref = Zir.Inst.Ref; const log = std.log.scoped(.autodoc); @@ -95,8 +96,6 @@ pub fn generateZirData(self: *Autodoc) !void { } } - log.debug("Ref map size: {}", .{Ref.typed_value_map.len}); - const root_src_dir = self.comp_module.main_pkg.root_src_directory; const root_src_path = self.comp_module.main_pkg.root_src_path; const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path}); @@ -108,18 +107,20 @@ pub fn generateZirData(self: *Autodoc) !void { const file = self.comp_module.import_table.get(abs_root_src_path).?; // file is expected to be present in the import table // Append all the types in Zir.Inst.Ref. { - try self.types.append(self.arena, .{ - .ComptimeExpr = .{ .name = "ComptimeExpr" }, - }); - - // this skips Ref.none but it's ok becuse we replaced it with ComptimeExpr - var i: u32 = 1; - while (i <= @enumToInt(Ref.anyerror_void_error_union_type)) : (i += 1) { + comptime std.debug.assert(@enumToInt(InternPool.Index.first_type) == 0); + var i: u32 = 0; + while (i <= @enumToInt(InternPool.Index.last_type)) : (i += 1) { + const ip_index = @intToEnum(InternPool.Index, i); var tmpbuf = std.ArrayList(u8).init(self.arena); - try Ref.typed_value_map[i].val.fmtDebug().format("", .{}, tmpbuf.writer()); + if (ip_index == .generic_poison_type) { + // Not a real type, doesn't have a normal name + try tmpbuf.writer().writeAll("(generic poison)"); + } else { + try ip_index.toType().fmt(self.comp_module).format("", .{}, tmpbuf.writer()); + } try self.types.append( self.arena, - switch (@intToEnum(Ref, i)) { + switch (ip_index) { else => blk: { // TODO: map the remaining refs to a correct type // instead of just assinging "array" to them. @@ -1040,7 +1041,7 @@ fn walkInstruction( .ret_load => { const un_node = data[inst_index].un_node; const res_ptr_ref = un_node.operand; - const res_ptr_inst = @enumToInt(res_ptr_ref) - Ref.typed_value_map.len; + const res_ptr_inst = Zir.refToIndex(res_ptr_ref).?; // TODO: this instruction doesn't let us know trivially if there's // branching involved or not. For now here's the strat: // We search backwarts until `ret_ptr` for `store_node`, @@ -2157,11 +2158,10 @@ fn walkInstruction( const lhs_ref = blk: { var lhs_extra = extra; while (true) { - if (@enumToInt(lhs_extra.data.lhs) < Ref.typed_value_map.len) { + const lhs = Zir.refToIndex(lhs_extra.data.lhs) orelse { break :blk lhs_extra.data.lhs; - } + }; - const lhs = @enumToInt(lhs_extra.data.lhs) - Ref.typed_value_map.len; if (tags[lhs] != .field_val and tags[lhs] != .field_ptr and tags[lhs] != .field_type) break :blk lhs_extra.data.lhs; @@ -2188,8 +2188,7 @@ fn walkInstruction( // TODO: double check that we really don't need type info here const wr = blk: { - if (@enumToInt(lhs_ref) >= Ref.typed_value_map.len) { - const lhs_inst = @enumToInt(lhs_ref) - Ref.typed_value_map.len; + if (Zir.refToIndex(lhs_ref)) |lhs_inst| { if (tags[lhs_inst] == .call or tags[lhs_inst] == .field_call) { break :blk DocData.WalkResult{ .expr = .{ @@ -4672,16 +4671,19 @@ fn walkRef( ref: Ref, need_type: bool, // true when the caller needs also a typeRef for the return value ) AutodocErrors!DocData.WalkResult { - const enum_value = @enumToInt(ref); - if (enum_value <= @enumToInt(Ref.anyerror_void_error_union_type)) { + if (ref == .none) { + return .{ .expr = .{ .comptimeExpr = 0 } }; + } else if (@enumToInt(ref) <= @enumToInt(InternPool.Index.last_type)) { // We can just return a type that indexes into `types` with the // enum value because in the beginning we pre-filled `types` with // the types that are listed in `Ref`. return DocData.WalkResult{ .typeRef = .{ .type = @enumToInt(std.builtin.TypeId.Type) }, - .expr = .{ .type = enum_value }, + .expr = .{ .type = @enumToInt(ref) }, }; - } else if (enum_value < Ref.typed_value_map.len) { + } else if (Zir.refToIndex(ref)) |zir_index| { + return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type); + } else { switch (ref) { else => { panicWithContext( @@ -4774,9 +4776,6 @@ fn walkRef( // } }; // }, } - } else { - const zir_index = enum_value - Ref.typed_value_map.len; - return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type); } } diff --git a/src/Compilation.zig b/src/Compilation.zig index cbdc789d40..9397bc93a9 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -87,6 +87,7 @@ clang_preprocessor_mode: ClangPreprocessorMode, /// Whether to print clang argvs to stdout. verbose_cc: bool, verbose_air: bool, +verbose_intern_pool: bool, verbose_llvm_ir: ?[]const u8, verbose_llvm_bc: ?[]const u8, verbose_cimport: bool, @@ -226,7 +227,7 @@ const Job = union(enum) { /// Write the constant value for a Decl to the output file. codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. - codegen_func: *Module.Fn, + codegen_func: Module.Fn.Index, /// Render the .h file snippet for the Decl. emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. @@ -593,6 +594,7 @@ pub const InitOptions = struct { verbose_cc: bool = false, verbose_link: bool = false, verbose_air: bool = false, + verbose_intern_pool: bool = false, verbose_llvm_ir: ?[]const u8 = null, verbose_llvm_bc: ?[]const u8 = null, verbose_cimport: bool = false, @@ -1315,9 +1317,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .global_zir_cache = global_zir_cache, .local_zir_cache = local_zir_cache, .emit_h = emit_h, - .error_name_list = .{}, + .tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), }; - try module.error_name_list.append(gpa, "(no error)"); + try module.init(); break :blk module; } else blk: { @@ -1574,6 +1576,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .clang_preprocessor_mode = options.clang_preprocessor_mode, .verbose_cc = options.verbose_cc, .verbose_air = options.verbose_air, + .verbose_intern_pool = options.verbose_intern_pool, .verbose_llvm_ir = options.verbose_llvm_ir, .verbose_llvm_bc = options.verbose_llvm_bc, .verbose_cimport = options.verbose_cimport, @@ -2026,6 +2029,13 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void try comp.performAllTheWork(main_progress_node); if (comp.bin_file.options.module) |module| { + if (builtin.mode == .Debug and comp.verbose_intern_pool) { + std.debug.print("intern pool stats for '{s}':\n", .{ + comp.bin_file.options.root_name, + }); + module.intern_pool.dump(); + } + if (comp.bin_file.options.is_test and comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that @@ -2042,7 +2052,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void assert(decl.deletion_flag); assert(decl.dependants.count() == 0); const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk decl.src_namespace.anon_decls.swapRemove(decl_index); + break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index); } else false; try module.clearDecl(decl_index, null); @@ -2523,8 +2533,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (module.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; if (module.cimport_errors.get(key)) |errors| { total += errors.len; @@ -2533,8 +2542,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { } if (module.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; } } @@ -2618,7 +2626,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_files.iterator(); while (it.next()) |entry| { if (entry.value_ptr.*) |msg| { - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2631,17 +2639,17 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_embed_files.iterator(); while (it.next()) |entry| { const msg = entry.value_ptr.*; - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } } { var it = module.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + if (module.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(std.mem.span(c_error.msg)), @@ -2662,16 +2670,16 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { if (module.emit_h) |emit_h| { var it = emit_h.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + if (module.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); } } } for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(&bundle, value.*); + try addModuleErrorMsg(module, &bundle, value.*); } } @@ -2703,7 +2711,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { const values = module.compile_log_decls.values(); // First one will be the error; subsequent ones will be notes. const err_decl = module.declPtr(keys[0]); - const src_loc = err_decl.nodeOffsetSrcLoc(values[0]); + const src_loc = err_decl.nodeOffsetSrcLoc(values[0], module); const err_msg = Module.ErrorMsg{ .src_loc = src_loc, .msg = "found compile log statement", @@ -2714,12 +2722,12 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { for (keys[1..], 0..) |key, i| { const note_decl = module.declPtr(key); err_msg.notes[i] = .{ - .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]), + .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1], module), .msg = "also here", }; } - try addModuleErrorMsg(&bundle, err_msg); + try addModuleErrorMsg(module, &bundle, err_msg); } } @@ -2775,8 +2783,9 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { const gpa = eb.gpa; + const ip = &mod.intern_pool; const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); @@ -2802,7 +2811,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) .src_loc = .none, }); break; - } else if (module_reference.decl == null) { + } else if (module_reference.decl == .none) { try ref_traces.append(gpa, .{ .decl_name = 0, .src_loc = .none, @@ -2815,7 +2824,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); defer gpa.free(rt_file_path); try ref_traces.append(gpa, .{ - .decl_name = try eb.addString(std.mem.sliceTo(module_reference.decl.?, 0)), + .decl_name = try eb.addString(ip.stringToSliceUnwrap(module_reference.decl).?), .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(rt_file_path), .span_start = span.start, @@ -3204,7 +3213,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v // Tests are always emitted in test binaries. The decl_refs are created by // Module.populateTestFunctions, but this will not queue body analysis, so do // that now. - try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data); + const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?; + try module.ensureFuncBodyAnalysisQueued(func_index); } }, .update_embed_file => |embed_file| { @@ -3228,7 +3238,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(module), "unable to update line number: {s}", .{@errorName(err)}, )); @@ -3841,7 +3851,7 @@ fn reportRetryableEmbedFileError( const mod = comp.bin_file.options.module.?; const gpa = mod.gpa; - const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(); + const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod); const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path| try Module.ErrorMsg.create( @@ -5417,6 +5427,7 @@ fn buildOutputFromZig( .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_air = comp.verbose_air, + .verbose_intern_pool = comp.verbose_intern_pool, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_llvm_bc = comp.verbose_llvm_bc, .verbose_cimport = comp.verbose_cimport, @@ -5495,6 +5506,7 @@ pub fn build_crt_file( .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_air = comp.verbose_air, + .verbose_intern_pool = comp.verbose_intern_pool, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_llvm_bc = comp.verbose_llvm_bc, .verbose_cimport = comp.verbose_cimport, diff --git a/src/InternPool.zig b/src/InternPool.zig index 74155ca657..c208fcf18a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,75 +1,903 @@ +//! All interned objects have both a value and a type. +//! This data structure is self-contained, with the following exceptions: +//! * type_struct via Module.Struct.Index +//! * type_opaque via Module.Namespace.Index and Module.Decl.Index + +/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are +/// constructed lazily. map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, +/// On 32-bit systems, this array is ignored and extra is used for everything. +/// On 64-bit systems, this array is used for big integers and associated metadata. +/// Use the helper methods instead of accessing this directly in order to not +/// violate the above mechanism. +limbs: std.ArrayListUnmanaged(u64) = .{}, +/// In order to store references to strings in fewer bytes, we copy all +/// string bytes into here. String bytes can be null. It is up to whomever +/// is referencing the data here whether they want to store both index and length, +/// thus allowing null bytes, or store only index, and use null-termination. The +/// `string_bytes` array is agnostic to either usage. +string_bytes: std.ArrayListUnmanaged(u8) = .{}, -const InternPool = @This(); +/// Struct objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_structs: std.SegmentedList(Module.Struct, 0) = .{}, +/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack. +structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{}, + +/// Union objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, +/// When a Union object is freed from `allocated_unions`, it is pushed into this stack. +unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, + +/// Fn objects are stored in this data structure because: +/// * They need to be mutated after creation. +allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{}, +/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack. +funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{}, + +/// InferredErrorSet objects are stored in this data structure because: +/// * They contain pointers such as the errors map and the set of other inferred error sets. +/// * They need to be mutated after creation. +allocated_inferred_error_sets: std.SegmentedList(Module.Fn.InferredErrorSet, 0) = .{}, +/// When a Struct object is freed from `allocated_inferred_error_sets`, it is +/// pushed into this stack. +inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.Fn.InferredErrorSet.Index) = .{}, + +/// Some types such as enums, structs, and unions need to store mappings from field names +/// to field index, or value to field index. In such cases, they will store the underlying +/// field names and values directly, relying on one of these maps, stored separately, +/// to provide lookup. +maps: std.ArrayListUnmanaged(std.AutoArrayHashMapUnmanaged(void, void)) = .{}, + +/// Used for finding the index inside `string_bytes`. +string_table: std.HashMapUnmanaged( + u32, + void, + std.hash_map.StringIndexContext, + std.hash_map.default_max_load_percentage, +) = .{}, + +const builtin = @import("builtin"); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; +const Limb = std.math.big.Limb; +const Hash = std.hash.Wyhash; + +const InternPool = @This(); +const Module = @import("Module.zig"); +const Sema = @import("Sema.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a); + return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { + return a.hash32(ctx.intern_pool); + } +}; + +/// An index into `maps` which might be `none`. +pub const OptionalMapIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { + if (oi == .none) return null; + return @intToEnum(MapIndex, @enumToInt(oi)); + } +}; + +/// An index into `maps`. +pub const MapIndex = enum(u32) { + _, + + pub fn toOptional(i: MapIndex) OptionalMapIndex { + return @intToEnum(OptionalMapIndex, @enumToInt(i)); + } +}; + +pub const RuntimeIndex = enum(u32) { + zero = 0, + comptime_field_ptr = std.math.maxInt(u32), + _, + + pub fn increment(ri: *RuntimeIndex) void { + ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1); + } +}; + +/// An index into `string_bytes`. +pub const String = enum(u32) { + _, +}; + +/// An index into `string_bytes`. +pub const NullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, + _, + + pub fn toString(self: NullTerminatedString) String { + return @intToEnum(String, @enumToInt(self)); + } + + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { + return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); + } + + const Adapter = struct { + strings: []const NullTerminatedString, + + pub fn eql(ctx: @This(), a: NullTerminatedString, b_void: void, b_map_index: usize) bool { + _ = b_void; + return a == ctx.strings[b_map_index]; + } + + pub fn hash(ctx: @This(), a: NullTerminatedString) u32 { + _ = ctx; + return std.hash.uint32(@enumToInt(a)); + } + }; + + /// Compare based on integer value alone, ignoring the string contents. + pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool { _ = ctx; - return a.hash(); + return @enumToInt(a) < @enumToInt(b); + } + + pub fn toUnsigned(self: NullTerminatedString, ip: *const InternPool) ?u32 { + const s = ip.stringToSlice(self); + if (s.len > 1 and s[0] == '0') return null; + if (std.mem.indexOfScalar(u8, s, '_')) |_| return null; + return std.fmt.parseUnsigned(u32, s, 10) catch null; + } + + const FormatData = struct { + string: NullTerminatedString, + ip: *const InternPool, + }; + fn format( + data: FormatData, + comptime specifier: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + const s = data.ip.stringToSlice(data.string); + if (comptime std.mem.eql(u8, specifier, "")) { + try writer.writeAll(s); + } else if (comptime std.mem.eql(u8, specifier, "i")) { + try writer.print("{}", .{std.zig.fmtId(s)}); + } else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); + } + + pub fn fmt(self: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { + return .{ .data = .{ .string = self, .ip = ip } }; + } +}; + +/// An index into `string_bytes` which might be `none`. +pub const OptionalNullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, + none = std.math.maxInt(u32), + _, + + pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { + if (oi == .none) return null; + return @intToEnum(NullTerminatedString, @enumToInt(oi)); } }; pub const Key = union(enum) { - int_type: struct { - signedness: std.builtin.Signedness, - bits: u16, - }, - ptr_type: struct { - elem_type: Index, - sentinel: Index, - alignment: u16, - size: std.builtin.Type.Pointer.Size, - is_const: bool, - is_volatile: bool, - is_allowzero: bool, - address_space: std.builtin.AddressSpace, - }, - array_type: struct { - len: u64, - child: Index, - sentinel: Index, - }, - vector_type: struct { - len: u32, - child: Index, - }, - optional_type: struct { - payload_type: Index, - }, - error_union_type: struct { + int_type: IntType, + ptr_type: PtrType, + array_type: ArrayType, + vector_type: VectorType, + opt_type: Index, + /// `anyframe->T`. The payload is the child type, which may be `none` to indicate + /// `anyframe`. + anyframe_type: Index, + error_union_type: ErrorUnionType, + simple_type: SimpleType, + /// This represents a struct that has been explicitly declared in source code, + /// or was created with `@Type`. It is unique and based on a declaration. + /// It may be a tuple, if declared like this: `struct {A, B, C}`. + struct_type: StructType, + /// This is an anonymous struct or tuple type which has no corresponding + /// declaration. It is used for types that have no `struct` keyword in the + /// source code, and were not created via `@Type`. + anon_struct_type: AnonStructType, + union_type: UnionType, + opaque_type: OpaqueType, + enum_type: EnumType, + func_type: FuncType, + error_set_type: ErrorSetType, + inferred_error_set_type: Module.Fn.InferredErrorSet.Index, + + /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented + /// via `simple_value` and has a named `Index` tag for it. + undef: Index, + runtime_value: TypeValue, + simple_value: SimpleValue, + variable: Variable, + extern_func: ExternFunc, + func: Func, + int: Key.Int, + err: Error, + error_union: ErrorUnion, + enum_literal: NullTerminatedString, + /// A specific enum tag, indicated by the integer tag value. + enum_tag: EnumTag, + /// An empty enum or union. TODO: this value's existence is strange, because such a type in + /// reality has no values. See #15909. + /// Payload is the type for which we are an empty value. + empty_enum_value: Index, + float: Float, + ptr: Ptr, + opt: Opt, + /// An instance of a struct, array, or vector. + /// Each element/field stored as an `Index`. + /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, + /// so the slice length will be one more than the type's array length. + aggregate: Aggregate, + /// An instance of a union. + un: Union, + + /// A comptime function call with a memoized result. + memoized_call: Key.MemoizedCall, + + pub const TypeValue = extern struct { + ty: Index, + val: Index, + }; + + pub const IntType = std.builtin.Type.Int; + + /// Extern for hashing via memory reinterpretation. + pub const ErrorUnionType = extern struct { error_set_type: Index, payload_type: Index, - }, - simple: Simple, + }; - pub fn hash(key: Key) u32 { - var hasher = std.hash.Wyhash.init(0); - switch (key) { - .int_type => |int_type| { - std.hash.autoHash(&hasher, int_type); - }, - .array_type => |array_type| { - std.hash.autoHash(&hasher, array_type); - }, - else => @panic("TODO"), + pub const ErrorSetType = struct { + /// Set of error names, sorted by null terminated string index. + names: []const NullTerminatedString, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + + /// Look up field index based on field name. + pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); } - return @truncate(u32, hasher.final()); + }; + + /// Extern layout so it can be hashed with `std.mem.asBytes`. + pub const PtrType = extern struct { + child: Index, + sentinel: Index = .none, + flags: Flags = .{}, + packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 }, + + pub const VectorIndex = enum(u16) { + none = std.math.maxInt(u16), + runtime = std.math.maxInt(u16) - 1, + _, + }; + + pub const Flags = packed struct(u32) { + size: Size = .One, + /// `none` indicates the ABI alignment of the pointee_type. In this + /// case, this field *must* be set to `none`, otherwise the + /// `InternPool` equality and hashing functions will return incorrect + /// results. + alignment: Alignment = .none, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + /// See src/target.zig defaultAddressSpace function for how to obtain + /// an appropriate value for this field. + address_space: AddressSpace = .generic, + vector_index: VectorIndex = .none, + }; + + pub const PackedOffset = packed struct(u32) { + /// If this is non-zero it means the pointer points to a sub-byte + /// range of data, which is backed by a "host integer" with this + /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. + host_size: u16, + bit_offset: u16, + }; + + pub const Size = std.builtin.Type.Pointer.Size; + pub const AddressSpace = std.builtin.AddressSpace; + }; + + /// Extern so that hashing can be done via memory reinterpreting. + pub const ArrayType = extern struct { + len: u64, + child: Index, + sentinel: Index = .none, + }; + + /// Extern so that hashing can be done via memory reinterpreting. + pub const VectorType = extern struct { + len: u32, + child: Index, + }; + + pub const OpaqueType = extern struct { + /// The Decl that corresponds to the opaque itself. + decl: Module.Decl.Index, + /// Represents the declarations inside this opaque. + namespace: Module.Namespace.Index, + }; + + pub const StructType = extern struct { + /// The `none` tag is used to represent a struct with no fields. + index: Module.Struct.OptionalIndex, + /// May be `none` if the struct has no declarations. + namespace: Module.Namespace.OptionalIndex, + }; + + pub const AnonStructType = struct { + types: []const Index, + /// This may be empty, indicating this is a tuple. + names: []const NullTerminatedString, + /// These elements may be `none`, indicating runtime-known. + values: []const Index, + + pub fn isTuple(self: AnonStructType) bool { + return self.names.len == 0; + } + }; + + pub const UnionType = struct { + index: Module.Union.Index, + runtime_tag: RuntimeTag, + + pub const RuntimeTag = enum { none, safety, tagged }; + + pub fn hasTag(self: UnionType) bool { + return switch (self.runtime_tag) { + .none => false, + .tagged, .safety => true, + }; + } + }; + + pub const EnumType = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// Represents the declarations inside this enum. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum. + /// This field is present regardless of whether the enum has an + /// explicitly provided tag type or auto-numbered. + tag_ty: Index, + /// Set of field names in declaration order. + names: []const NullTerminatedString, + /// Maps integer tag value to field index. + /// Entries are in declaration order, same as `fields`. + /// If this is empty, it means the enum tags are auto-numbered. + values: []const Index, + tag_mode: TagMode, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + /// This is ignored by `get` but will be provided by `indexToKey` when + /// a value map exists. + values_map: OptionalMapIndex = .none, + + pub const TagMode = enum { + /// The integer tag type was auto-numbered by zig. + auto, + /// The integer tag type was provided by the enum declaration, and the enum + /// is exhaustive. + explicit, + /// The integer tag type was provided by the enum declaration, and the enum + /// is non-exhaustive. + nonexhaustive, + }; + + /// Look up field index based on field name. + pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); + } + + /// Look up field index based on tag value. + /// Asserts that `values_map` is not `none`. + /// This function returns `null` when `tag_val` does not have the + /// integer tag type of the enum. + pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 { + assert(tag_val != .none); + // TODO: we should probably decide a single interface for this function, but currently + // it's being called with both tag values and underlying ints. Fix this! + const int_tag_val = switch (ip.indexToKey(tag_val)) { + .enum_tag => |enum_tag| enum_tag.int, + .int => tag_val, + else => unreachable, + }; + if (self.values_map.unwrap()) |values_map| { + const map = &ip.maps.items[@enumToInt(values_map)]; + const adapter: Index.Adapter = .{ .indexes = self.values }; + const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; + return @intCast(u32, field_index); + } + // Auto-numbered enum. Convert `int_tag_val` to field index. + switch (ip.indexToKey(int_tag_val).int.storage) { + .u64 => |x| { + if (x >= self.names.len) return null; + return @intCast(u32, x); + }, + .i64, .big_int => return null, // out of range + .lazy_align, .lazy_size => unreachable, + } + } + }; + + pub const IncompleteEnumType = struct { + /// Same as corresponding `EnumType` field. + decl: Module.Decl.Index, + /// Same as corresponding `EnumType` field. + namespace: Module.Namespace.OptionalIndex, + /// The field names and field values are not known yet, but + /// the number of fields must be known ahead of time. + fields_len: u32, + /// This information is needed so that the size does not change + /// later when populating field values. + has_values: bool, + /// Same as corresponding `EnumType` field. + tag_mode: EnumType.TagMode, + /// This may be updated via `setTagType` later. + tag_ty: Index = .none, + + pub fn toEnumType(self: @This()) EnumType { + return .{ + .decl = self.decl, + .namespace = self.namespace, + .tag_ty = self.tag_ty, + .tag_mode = self.tag_mode, + .names = &.{}, + .values = &.{}, + }; + } + + /// Only the decl is used for hashing and equality, so we can construct + /// this minimal key for use with `map`. + pub fn toKey(self: @This()) Key { + return .{ .enum_type = self.toEnumType() }; + } + }; + + pub const FuncType = struct { + param_types: []Index, + return_type: Index, + /// Tells whether a parameter is comptime. See `paramIsComptime` helper + /// method for accessing this. + comptime_bits: u32, + /// Tells whether a parameter is noalias. See `paramIsNoalias` helper + /// method for accessing this. + noalias_bits: u32, + /// `none` indicates the function has the default alignment for + /// function code on the target. In this case, this field *must* be set + /// to `none`, otherwise the `InternPool` equality and hashing + /// functions will return incorrect results. + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + + pub fn paramIsComptime(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.comptime_bits >> i) != 0; + } + + pub fn paramIsNoalias(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.noalias_bits >> i) != 0; + } + }; + + pub const Variable = struct { + ty: Index, + init: Index, + decl: Module.Decl.Index, + lib_name: OptionalNullTerminatedString = .none, + is_extern: bool = false, + is_const: bool = false, + is_threadlocal: bool = false, + is_weak_linkage: bool = false, + }; + + pub const ExternFunc = struct { + ty: Index, + /// The Decl that corresponds to the function itself. + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: OptionalNullTerminatedString, + }; + + /// Extern so it can be hashed by reinterpreting memory. + pub const Func = extern struct { + ty: Index, + index: Module.Fn.Index, + }; + + pub const Int = struct { + ty: Index, + storage: Storage, + + pub const Storage = union(enum) { + u64: u64, + i64: i64, + big_int: BigIntConst, + lazy_align: Index, + lazy_size: Index, + + /// Big enough to fit any non-BigInt value + pub const BigIntSpace = struct { + /// The +1 is headroom so that operations such as incrementing once + /// or decrementing once are possible without using an allocator. + limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, + }; + + pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { + return switch (storage) { + .big_int => |x| x, + inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + .lazy_align, .lazy_size => unreachable, + }; + } + }; + }; + + pub const Error = extern struct { + ty: Index, + name: NullTerminatedString, + }; + + pub const ErrorUnion = struct { + ty: Index, + val: Value, + + pub const Value = union(enum) { + err_name: NullTerminatedString, + payload: Index, + }; + }; + + pub const EnumTag = extern struct { + /// The enum type. + ty: Index, + /// The integer tag value which has the integer tag type of the enum. + int: Index, + }; + + pub const Float = struct { + ty: Index, + /// The storage used must match the size of the float type being represented. + storage: Storage, + + pub const Storage = union(enum) { + f16: f16, + f32: f32, + f64: f64, + f80: f80, + f128: f128, + }; + }; + + pub const Ptr = struct { + /// This is the pointer type, not the element type. + ty: Index, + /// The value of the address that the pointer points to. + addr: Addr, + /// This could be `none` if size is not a slice. + len: Index = .none, + + pub const Addr = union(enum) { + decl: Module.Decl.Index, + mut_decl: MutDecl, + comptime_field: Index, + int: Index, + eu_payload: Index, + opt_payload: Index, + elem: BaseIndex, + field: BaseIndex, + + pub const MutDecl = struct { + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, + }; + pub const BaseIndex = struct { + base: Index, + index: u64, + }; + }; + }; + + /// `null` is represented by the `val` field being `none`. + pub const Opt = extern struct { + /// This is the optional type; not the payload type. + ty: Index, + /// This could be `none`, indicating the optional is `null`. + val: Index, + }; + + pub const Union = extern struct { + /// This is the union type; not the field type. + ty: Index, + /// Indicates the active field. + tag: Index, + /// The value of the active field. + val: Index, + }; + + pub const Aggregate = struct { + ty: Index, + storage: Storage, + + pub const Storage = union(enum) { + bytes: []const u8, + elems: []const Index, + repeated_elem: Index, + + pub fn values(self: *const Storage) []const Index { + return switch (self.*) { + .bytes => &.{}, + .elems => |elems| elems, + .repeated_elem => |*elem| @as(*const [1]Index, elem), + }; + } + }; + }; + + pub const MemoizedCall = struct { + func: Module.Fn.Index, + arg_values: []const Index, + result: Index, + }; + + pub fn hash32(key: Key, ip: *const InternPool) u32 { + return @truncate(u32, key.hash64(ip)); } - pub fn eql(a: Key, b: Key) bool { - const KeyTag = std.meta.Tag(Key); + pub fn hash64(key: Key, ip: *const InternPool) u64 { + const asBytes = std.mem.asBytes; + const KeyTag = @typeInfo(Key).Union.tag_type.?; + const seed = @enumToInt(@as(KeyTag, key)); + return switch (key) { + // TODO: assert no padding in these types + inline .ptr_type, + .func, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .simple_value, + .opt, + .struct_type, + .undef, + .err, + .enum_literal, + .enum_tag, + .empty_enum_value, + .inferred_error_set_type, + .un, + => |x| Hash.hash(seed, asBytes(&x)), + + .int_type => |x| Hash.hash(seed + @enumToInt(x.signedness), asBytes(&x.bits)), + .union_type => |x| Hash.hash(seed + @enumToInt(x.runtime_tag), asBytes(&x.index)), + + .error_union => |x| switch (x.val) { + .err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), + .payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), + }, + + .runtime_value => |x| Hash.hash(seed, asBytes(&x.val)), + .opaque_type => |x| Hash.hash(seed, asBytes(&x.decl)), + + .enum_type => |enum_type| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, enum_type.decl); + return hasher.final(); + }, + + .variable => |variable| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, variable.decl); + return hasher.final(); + }, + .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), + + .int => |int| { + var hasher = Hash.init(seed); + // Canonicalize all integers by converting them to BigIntConst. + switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + + std.hash.autoHash(&hasher, int.ty); + std.hash.autoHash(&hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb); + }, + .lazy_align, .lazy_size => |lazy_ty| { + std.hash.autoHash( + &hasher, + @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), + ); + std.hash.autoHash(&hasher, lazy_ty); + }, + } + return hasher.final(); + }, + + .float => |float| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, float.ty); + switch (float.storage) { + inline else => |val| std.hash.autoHash( + &hasher, + @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val), + ), + } + return hasher.final(); + }, + + .ptr => |ptr| { + // Int-to-ptr pointers are hashed separately than decl-referencing pointers. + // This is sound due to pointer provenance rules. + const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr; + const seed2 = seed + @enumToInt(addr); + const common = asBytes(&ptr.ty) ++ asBytes(&ptr.len); + return switch (ptr.addr) { + .decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), + + .mut_decl => |x| Hash.hash( + seed2, + asBytes(&x.decl) ++ asBytes(&x.runtime_index), + ), + + .int, .eu_payload, .opt_payload, .comptime_field => |int| Hash.hash( + seed2, + asBytes(&int), + ), + + .elem, .field => |x| Hash.hash( + seed2, + asBytes(&x.base) ++ asBytes(&x.index), + ), + }; + }, + + .aggregate => |aggregate| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, aggregate.ty); + const len = ip.aggregateTypeLen(aggregate.ty); + const child = switch (ip.indexToKey(aggregate.ty)) { + .array_type => |array_type| array_type.child, + .vector_type => |vector_type| vector_type.child, + .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + + if (child == .u8_type) { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| { + std.hash.autoHash(&hasher, KeyTag.int); + std.hash.autoHash(&hasher, byte); + }, + .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| { + const elem_key = ip.indexToKey(elem); + std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); + switch (elem_key) { + .undef => {}, + .int => |int| std.hash.autoHash( + &hasher, + @intCast(u8, int.storage.u64), + ), + else => unreachable, + } + }, + .repeated_elem => |elem| { + const elem_key = ip.indexToKey(elem); + var remaining = len; + while (remaining > 0) : (remaining -= 1) { + std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); + switch (elem_key) { + .undef => {}, + .int => |int| std.hash.autoHash( + &hasher, + @intCast(u8, int.storage.u64), + ), + else => unreachable, + } + } + }, + } + return hasher.final(); + } + + switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| + std.hash.autoHash(&hasher, elem), + .repeated_elem => |elem| { + var remaining = len; + while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem); + }, + } + return hasher.final(); + }, + + .error_set_type => |error_set_type| { + var hasher = Hash.init(seed); + for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem); + return hasher.final(); + }, + + .anon_struct_type => |anon_struct_type| { + var hasher = Hash.init(seed); + for (anon_struct_type.types) |elem| std.hash.autoHash(&hasher, elem); + for (anon_struct_type.values) |elem| std.hash.autoHash(&hasher, elem); + for (anon_struct_type.names) |elem| std.hash.autoHash(&hasher, elem); + return hasher.final(); + }, + + .func_type => |func_type| { + var hasher = Hash.init(seed); + for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type); + std.hash.autoHash(&hasher, func_type.return_type); + std.hash.autoHash(&hasher, func_type.comptime_bits); + std.hash.autoHash(&hasher, func_type.noalias_bits); + std.hash.autoHash(&hasher, func_type.alignment); + std.hash.autoHash(&hasher, func_type.cc); + std.hash.autoHash(&hasher, func_type.is_var_args); + std.hash.autoHash(&hasher, func_type.is_generic); + std.hash.autoHash(&hasher, func_type.is_noinline); + return hasher.final(); + }, + + .memoized_call => |memoized_call| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, memoized_call.func); + for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); + return hasher.final(); + }, + }; + } + + pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { + const KeyTag = @typeInfo(Key).Union.tag_type.?; const a_tag: KeyTag = a; const b_tag: KeyTag = b; if (a_tag != b_tag) return false; @@ -78,13 +906,327 @@ pub const Key = union(enum) { const b_info = b.int_type; return std.meta.eql(a_info, b_info); }, + .ptr_type => |a_info| { + const b_info = b.ptr_type; + return std.meta.eql(a_info, b_info); + }, .array_type => |a_info| { const b_info = b.array_type; return std.meta.eql(a_info, b_info); }, - else => @panic("TODO"), + .vector_type => |a_info| { + const b_info = b.vector_type; + return std.meta.eql(a_info, b_info); + }, + .opt_type => |a_info| { + const b_info = b.opt_type; + return a_info == b_info; + }, + .anyframe_type => |a_info| { + const b_info = b.anyframe_type; + return a_info == b_info; + }, + .error_union_type => |a_info| { + const b_info = b.error_union_type; + return std.meta.eql(a_info, b_info); + }, + .simple_type => |a_info| { + const b_info = b.simple_type; + return a_info == b_info; + }, + .simple_value => |a_info| { + const b_info = b.simple_value; + return a_info == b_info; + }, + .undef => |a_info| { + const b_info = b.undef; + return a_info == b_info; + }, + .runtime_value => |a_info| { + const b_info = b.runtime_value; + return a_info.val == b_info.val; + }, + .opt => |a_info| { + const b_info = b.opt; + return std.meta.eql(a_info, b_info); + }, + .struct_type => |a_info| { + const b_info = b.struct_type; + return std.meta.eql(a_info, b_info); + }, + .union_type => |a_info| { + const b_info = b.union_type; + return std.meta.eql(a_info, b_info); + }, + .un => |a_info| { + const b_info = b.un; + return std.meta.eql(a_info, b_info); + }, + .err => |a_info| { + const b_info = b.err; + return std.meta.eql(a_info, b_info); + }, + .error_union => |a_info| { + const b_info = b.error_union; + return std.meta.eql(a_info, b_info); + }, + .enum_literal => |a_info| { + const b_info = b.enum_literal; + return a_info == b_info; + }, + .enum_tag => |a_info| { + const b_info = b.enum_tag; + return std.meta.eql(a_info, b_info); + }, + .empty_enum_value => |a_info| { + const b_info = b.empty_enum_value; + return a_info == b_info; + }, + + .variable => |a_info| { + const b_info = b.variable; + return a_info.decl == b_info.decl; + }, + .extern_func => |a_info| { + const b_info = b.extern_func; + return a_info.ty == b_info.ty and a_info.decl == b_info.decl; + }, + .func => |a_info| { + const b_info = b.func; + return a_info.ty == b_info.ty and a_info.index == b_info.index; + }, + + .ptr => |a_info| { + const b_info = b.ptr; + if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false; + + const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?; + if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; + + return switch (a_info.addr) { + .decl => |a_decl| a_decl == b_info.addr.decl, + .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), + .int => |a_int| a_int == b_info.addr.int, + .eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload, + .opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload, + .comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field, + .elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem), + .field => |a_field| std.meta.eql(a_field, b_info.addr.field), + }; + }, + + .int => |a_info| { + const b_info = b.int; + + if (a_info.ty != b_info.ty) + return false; + + return switch (a_info.storage) { + .u64 => |aa| switch (b_info.storage) { + .u64 => |bb| aa == bb, + .i64 => |bb| aa == bb, + .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, + }, + .i64 => |aa| switch (b_info.storage) { + .u64 => |bb| aa == bb, + .i64 => |bb| aa == bb, + .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, + }, + .big_int => |aa| switch (b_info.storage) { + .u64 => |bb| aa.orderAgainstScalar(bb) == .eq, + .i64 => |bb| aa.orderAgainstScalar(bb) == .eq, + .big_int => |bb| aa.eq(bb), + .lazy_align, .lazy_size => false, + }, + .lazy_align => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_size => false, + .lazy_align => |bb| aa == bb, + }, + .lazy_size => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_align => false, + .lazy_size => |bb| aa == bb, + }, + }; + }, + + .float => |a_info| { + const b_info = b.float; + + if (a_info.ty != b_info.ty) + return false; + + if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) { + // These are strange: we'll sometimes represent them as f128, even if the + // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. + const a_val = switch (a_info.storage) { + inline else => |val| @floatCast(f128, val), + }; + const b_val = switch (b_info.storage) { + inline else => |val| @floatCast(f128, val), + }; + return a_val == b_val; + } + + const StorageTag = @typeInfo(Key.Float.Storage).Union.tag_type.?; + assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage)); + + return switch (a_info.storage) { + inline else => |val, tag| val == @field(b_info.storage, @tagName(tag)), + }; + }, + + .opaque_type => |a_info| { + const b_info = b.opaque_type; + return a_info.decl == b_info.decl; + }, + .enum_type => |a_info| { + const b_info = b.enum_type; + return a_info.decl == b_info.decl; + }, + .aggregate => |a_info| { + const b_info = b.aggregate; + if (a_info.ty != b_info.ty) return false; + + const len = ip.aggregateTypeLen(a_info.ty); + const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; + if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { + for (0..@intCast(usize, len)) |elem_index| { + const a_elem = switch (a_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + const b_elem = switch (b_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + if (a_elem != b_elem) return false; + } + return true; + } + + switch (a_info.storage) { + .bytes => |a_bytes| { + const b_bytes = b_info.storage.bytes; + return std.mem.eql( + u8, + a_bytes[0..@intCast(usize, len)], + b_bytes[0..@intCast(usize, len)], + ); + }, + .elems => |a_elems| { + const b_elems = b_info.storage.elems; + return std.mem.eql( + Index, + a_elems[0..@intCast(usize, len)], + b_elems[0..@intCast(usize, len)], + ); + }, + .repeated_elem => |a_elem| { + const b_elem = b_info.storage.repeated_elem; + return a_elem == b_elem; + }, + } + }, + .anon_struct_type => |a_info| { + const b_info = b.anon_struct_type; + return std.mem.eql(Index, a_info.types, b_info.types) and + std.mem.eql(Index, a_info.values, b_info.values) and + std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, + .error_set_type => |a_info| { + const b_info = b.error_set_type; + return std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, + .inferred_error_set_type => |a_info| { + const b_info = b.inferred_error_set_type; + return a_info == b_info; + }, + + .func_type => |a_info| { + const b_info = b.func_type; + + return std.mem.eql(Index, a_info.param_types, b_info.param_types) and + a_info.return_type == b_info.return_type and + a_info.comptime_bits == b_info.comptime_bits and + a_info.noalias_bits == b_info.noalias_bits and + a_info.alignment == b_info.alignment and + a_info.cc == b_info.cc and + a_info.is_var_args == b_info.is_var_args and + a_info.is_generic == b_info.is_generic and + a_info.is_noinline == b_info.is_noinline; + }, + + .memoized_call => |a_info| { + const b_info = b.memoized_call; + return a_info.func == b_info.func and + std.mem.eql(Index, a_info.arg_values, b_info.arg_values); + }, } } + + pub fn typeOf(key: Key) Index { + return switch (key) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .simple_type, + .struct_type, + .union_type, + .opaque_type, + .enum_type, + .anon_struct_type, + .func_type, + => .type_type, + + inline .runtime_value, + .ptr, + .int, + .float, + .opt, + .variable, + .extern_func, + .func, + .err, + .error_union, + .enum_tag, + .aggregate, + .un, + => |x| x.ty, + + .enum_literal => .enum_literal_type, + + .undef => |x| x, + .empty_enum_value => |x| x, + + .simple_value => |s| switch (s) { + .undefined => .undefined_type, + .void => .void_type, + .null => .null_type, + .false, .true => .bool_type, + .empty_struct => .empty_struct_type, + .@"unreachable" => .noreturn_type, + .generic_poison => .generic_poison_type, + }, + + .memoized_call => unreachable, + }; + } }; pub const Item = struct { @@ -98,11 +1240,539 @@ pub const Item = struct { /// Two values which have the same type can be equality compared simply /// by checking if their indexes are equal, provided they are both in /// the same `InternPool`. +/// When adding a tag to this enum, consider adding a corresponding entry to +/// `primitives` in AstGen.zig. pub const Index = enum(u32) { + pub const first_type: Index = .u1_type; + pub const last_type: Index = .empty_struct_type; + pub const first_value: Index = .undef; + pub const last_value: Index = .empty_struct; + + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u80_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_char_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + call_modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + manyptr_const_u8_sentinel_0_type, + single_const_pointer_to_comptime_int_type, + slice_const_u8_type, + slice_const_u8_sentinel_0_type, + anyerror_void_error_union_type, + generic_poison_type, + /// `@TypeOf(.{})` + empty_struct_type, + + /// `undefined` (untyped) + undef, + /// `0` (comptime_int) + zero, + /// `0` (usize) + zero_usize, + /// `0` (u8) + zero_u8, + /// `1` (comptime_int) + one, + /// `1` (usize) + one_usize, + /// `1` (u8) + one_u8, + /// `4` (u8) + four_u8, + /// `-1` (comptime_int) + negative_one, + /// `std.builtin.CallingConvention.C` + calling_convention_c, + /// `std.builtin.CallingConvention.Inline` + calling_convention_inline, + /// `{}` + void_value, + /// `unreachable` (noreturn type) + unreachable_value, + /// `null` (untyped) + null_value, + /// `true` + bool_true, + /// `false` + bool_false, + /// `.{}` (untyped) + empty_struct, + + /// Used for generic parameters where the type and value + /// is not known until generic function instantiation. + generic_poison, + + /// Used by Air/Sema only. + var_args_param_type = std.math.maxInt(u32) - 1, none = std.math.maxInt(u32), + _, + + pub fn toType(i: Index) @import("type.zig").Type { + assert(i != .none); + return .{ .ip_index = i }; + } + + pub fn toValue(i: Index) @import("value.zig").Value { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } + + /// Used for a map of `Index` values to the index within a list of `Index` values. + const Adapter = struct { + indexes: []const Index, + + pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool { + _ = b_void; + return a == ctx.indexes[b_map_index]; + } + + pub fn hash(ctx: @This(), a: Index) u32 { + _ = ctx; + return std.hash.uint32(@enumToInt(a)); + } + }; + + /// This function is used in the debugger pretty formatters in tools/ to fetch the + /// Tag to encoding mapping to facilitate fancy debug printing for this type. + fn dbHelper(self: *Index, tag_to_encoding_map: *struct { + const DataIsIndex = struct { data: Index }; + const DataIsExtraIndexOfEnumExplicit = struct { + const @"data.fields_len" = opaque {}; + data: *EnumExplicit, + @"trailing.names.len": *@"data.fields_len", + @"trailing.values.len": *@"data.fields_len", + trailing: struct { + names: []NullTerminatedString, + values: []Index, + }, + }; + const DataIsExtraIndexOfTypeStructAnon = struct { + const @"data.fields_len" = opaque {}; + data: *TypeStructAnon, + @"trailing.types.len": *@"data.fields_len", + @"trailing.values.len": *@"data.fields_len", + @"trailing.names.len": *@"data.fields_len", + trailing: struct { + types: []Index, + values: []Index, + names: []NullTerminatedString, + }, + }; + + type_int_signed: struct { data: u32 }, + type_int_unsigned: struct { data: u32 }, + type_array_big: struct { data: *Array }, + type_array_small: struct { data: *Vector }, + type_vector: struct { data: *Vector }, + type_pointer: struct { data: *Tag.TypePointer }, + type_slice: DataIsIndex, + type_optional: DataIsIndex, + type_anyframe: DataIsIndex, + type_error_union: struct { data: *Key.ErrorUnionType }, + type_error_set: struct { + const @"data.names_len" = opaque {}; + data: *ErrorSet, + @"trailing.names.len": *@"data.names_len", + trailing: struct { names: []NullTerminatedString }, + }, + type_inferred_error_set: struct { data: Module.Fn.InferredErrorSet.Index }, + type_enum_auto: struct { + const @"data.fields_len" = opaque {}; + data: *EnumAuto, + @"trailing.names.len": *@"data.fields_len", + trailing: struct { names: []NullTerminatedString }, + }, + type_enum_explicit: DataIsExtraIndexOfEnumExplicit, + type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit, + simple_type: struct { data: SimpleType }, + type_opaque: struct { data: *Key.OpaqueType }, + type_struct: struct { data: Module.Struct.OptionalIndex }, + type_struct_ns: struct { data: Module.Namespace.Index }, + type_struct_anon: DataIsExtraIndexOfTypeStructAnon, + type_tuple_anon: DataIsExtraIndexOfTypeStructAnon, + type_union_tagged: struct { data: Module.Union.Index }, + type_union_untagged: struct { data: Module.Union.Index }, + type_union_safety: struct { data: Module.Union.Index }, + type_function: struct { + const @"data.params_len" = opaque {}; + data: *TypeFunction, + @"trailing.param_types.len": *@"data.params_len", + trailing: struct { param_types: []Index }, + }, + + undef: DataIsIndex, + runtime_value: struct { data: *Tag.TypeValue }, + simple_value: struct { data: SimpleValue }, + ptr_decl: struct { data: *PtrDecl }, + ptr_mut_decl: struct { data: *PtrMutDecl }, + ptr_comptime_field: struct { data: *PtrComptimeField }, + ptr_int: struct { data: *PtrBase }, + ptr_eu_payload: struct { data: *PtrBase }, + ptr_opt_payload: struct { data: *PtrBase }, + ptr_elem: struct { data: *PtrBaseIndex }, + ptr_field: struct { data: *PtrBaseIndex }, + ptr_slice: struct { data: *PtrSlice }, + opt_payload: struct { data: *Tag.TypeValue }, + opt_null: DataIsIndex, + int_u8: struct { data: u8 }, + int_u16: struct { data: u16 }, + int_u32: struct { data: u32 }, + int_i32: struct { data: i32 }, + int_usize: struct { data: u32 }, + int_comptime_int_u32: struct { data: u32 }, + int_comptime_int_i32: struct { data: i32 }, + int_small: struct { data: *IntSmall }, + int_positive: struct { data: u32 }, + int_negative: struct { data: u32 }, + int_lazy_align: struct { data: *IntLazy }, + int_lazy_size: struct { data: *IntLazy }, + error_set_error: struct { data: *Key.Error }, + error_union_error: struct { data: *Key.Error }, + error_union_payload: struct { data: *Tag.TypeValue }, + enum_literal: struct { data: NullTerminatedString }, + enum_tag: struct { data: *Tag.EnumTag }, + float_f16: struct { data: f16 }, + float_f32: struct { data: f32 }, + float_f64: struct { data: *Float64 }, + float_f80: struct { data: *Float80 }, + float_f128: struct { data: *Float128 }, + float_c_longdouble_f80: struct { data: *Float80 }, + float_c_longdouble_f128: struct { data: *Float128 }, + float_comptime_float: struct { data: *Float128 }, + variable: struct { data: *Tag.Variable }, + extern_func: struct { data: *Key.ExternFunc }, + func: struct { data: *Tag.Func }, + only_possible_value: DataIsIndex, + union_value: struct { data: *Key.Union }, + bytes: struct { data: *Bytes }, + aggregate: struct { + const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; + data: *Tag.Aggregate, + @"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", + trailing: struct { element_values: []Index }, + }, + repeated: struct { data: *Repeated }, + + memoized_call: struct { + const @"data.args_len" = opaque {}; + data: *MemoizedCall, + @"trailing.arg_values.len": *@"data.args_len", + trailing: struct { arg_values: []Index }, + }, + }) void { + _ = self; + const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; + @setEvalBranchQuota(2_000); + inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| { + inline for (0..map_fields.len) |offset| { + if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; + } else { + @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); + } + } + } + + comptime { + if (builtin.mode == .Debug) { + _ = &dbHelper; + } + } }; +pub const static_keys = [_]Key{ + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 1, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 29, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 80, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 128, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 128, + } }, + + .{ .simple_type = .usize }, + .{ .simple_type = .isize }, + .{ .simple_type = .c_char }, + .{ .simple_type = .c_short }, + .{ .simple_type = .c_ushort }, + .{ .simple_type = .c_int }, + .{ .simple_type = .c_uint }, + .{ .simple_type = .c_long }, + .{ .simple_type = .c_ulong }, + .{ .simple_type = .c_longlong }, + .{ .simple_type = .c_ulonglong }, + .{ .simple_type = .c_longdouble }, + .{ .simple_type = .f16 }, + .{ .simple_type = .f32 }, + .{ .simple_type = .f64 }, + .{ .simple_type = .f80 }, + .{ .simple_type = .f128 }, + .{ .simple_type = .anyopaque }, + .{ .simple_type = .bool }, + .{ .simple_type = .void }, + .{ .simple_type = .type }, + .{ .simple_type = .anyerror }, + .{ .simple_type = .comptime_int }, + .{ .simple_type = .comptime_float }, + .{ .simple_type = .noreturn }, + .{ .anyframe_type = .none }, + .{ .simple_type = .null }, + .{ .simple_type = .undefined }, + .{ .simple_type = .enum_literal }, + .{ .simple_type = .atomic_order }, + .{ .simple_type = .atomic_rmw_op }, + .{ .simple_type = .calling_convention }, + .{ .simple_type = .address_space }, + .{ .simple_type = .float_mode }, + .{ .simple_type = .reduce_op }, + .{ .simple_type = .call_modifier }, + .{ .simple_type = .prefetch_options }, + .{ .simple_type = .export_options }, + .{ .simple_type = .extern_options }, + .{ .simple_type = .type_info }, + + .{ .ptr_type = .{ + .child = .u8_type, + .flags = .{ + .size = .Many, + }, + } }, + + // manyptr_const_u8_type + .{ .ptr_type = .{ + .child = .u8_type, + .flags = .{ + .size = .Many, + .is_const = true, + }, + } }, + + // manyptr_const_u8_sentinel_0_type + .{ .ptr_type = .{ + .child = .u8_type, + .sentinel = .zero_u8, + .flags = .{ + .size = .Many, + .is_const = true, + }, + } }, + + .{ .ptr_type = .{ + .child = .comptime_int_type, + .flags = .{ + .size = .One, + .is_const = true, + }, + } }, + + // slice_const_u8_type + .{ .ptr_type = .{ + .child = .u8_type, + .flags = .{ + .size = .Slice, + .is_const = true, + }, + } }, + + // slice_const_u8_sentinel_0_type + .{ .ptr_type = .{ + .child = .u8_type, + .sentinel = .zero_u8, + .flags = .{ + .size = .Slice, + .is_const = true, + }, + } }, + + // anyerror_void_error_union_type + .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = .void_type, + } }, + + // generic_poison_type + .{ .simple_type = .generic_poison }, + + // empty_struct_type + .{ .anon_struct_type = .{ + .types = &.{}, + .names = &.{}, + .values = &.{}, + } }, + + .{ .simple_value = .undefined }, + + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = 0 }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = 0 }, + } }, + + .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = 0 }, + } }, + + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = 1 }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = 1 }, + } }, + + // one_u8 + .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = 1 }, + } }, + // four_u8 + .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = 4 }, + } }, + // negative_one + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = -1 }, + } }, + // calling_convention_c + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .int = .one_u8, + } }, + // calling_convention_inline + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .int = .four_u8, + } }, + + .{ .simple_value = .void }, + .{ .simple_value = .@"unreachable" }, + .{ .simple_value = .null }, + .{ .simple_value = .true }, + .{ .simple_value = .false }, + .{ .simple_value = .empty_struct }, + .{ .simple_value = .generic_poison }, +}; + +/// How many items in the InternPool are statically known. +pub const static_len: u32 = static_keys.len; + pub const Tag = enum(u8) { /// An integer type. /// data is number of bits @@ -110,36 +1780,418 @@ pub const Tag = enum(u8) { /// An integer type. /// data is number of bits type_int_unsigned, - /// An array type. + /// An array type whose length requires 64 bits or which has a sentinel. /// data is payload to Array. - type_array, - /// A type or value that can be represented with only an enum tag. - /// data is Simple enum value - simple, - /// An unsigned integer value that can be represented by u32. + type_array_big, + /// An array type that has no sentinel and whose length fits in 32 bits. + /// data is payload to Vector. + type_array_small, + /// A vector type. + /// data is payload to Vector. + type_vector, + /// A fully explicitly specified pointer type. + type_pointer, + /// A slice type. + /// data is Index of underlying pointer type. + type_slice, + /// An optional type. + /// data is the child type. + type_optional, + /// The type `anyframe->T`. + /// data is the child type. + /// If the child type is `none`, the type is `anyframe`. + type_anyframe, + /// An error union type. + /// data is payload to `Key.ErrorUnionType`. + type_error_union, + /// An error set type. + /// data is payload to `ErrorSet`. + type_error_set, + /// The inferred error set type of a function. + /// data is `Module.Fn.InferredErrorSet.Index`. + type_inferred_error_set, + /// An enum type with auto-numbered tag values. + /// The enum is exhaustive. + /// data is payload index to `EnumAuto`. + type_enum_auto, + /// An enum type with an explicitly provided integer tag type. + /// The enum is exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_explicit, + /// An enum type with an explicitly provided integer tag type. + /// The enum is non-exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_nonexhaustive, + /// A type that can be represented with only an enum tag. + /// data is SimpleType enum value. + simple_type, + /// An opaque type. + /// data is index of Key.OpaqueType in extra. + type_opaque, + /// A struct type. + /// data is Module.Struct.OptionalIndex + /// The `none` tag is used to represent `@TypeOf(.{})`. + type_struct, + /// A struct type that has only a namespace; no fields, and there is no + /// Module.Struct object allocated for it. + /// data is Module.Namespace.Index. + type_struct_ns, + /// An AnonStructType which stores types, names, and values for fields. + /// data is extra index of `TypeStructAnon`. + type_struct_anon, + /// An AnonStructType which has only types and values for fields. + /// data is extra index of `TypeStructAnon`. + type_tuple_anon, + /// A tagged union type. + /// `data` is `Module.Union.Index`. + type_union_tagged, + /// An untagged union type. It also has no safety tag. + /// `data` is `Module.Union.Index`. + type_union_untagged, + /// An untagged union type which has a safety tag. + /// `data` is `Module.Union.Index`. + type_union_safety, + /// A function body type. + /// `data` is extra index to `TypeFunction`. + type_function, + + /// Typed `undefined`. + /// `data` is `Index` of the type. + /// Untyped `undefined` is stored instead via `simple_value`. + undef, + /// A wrapper for values which are comptime-known but should + /// semantically be runtime-known. + /// data is extra index of `TypeValue`. + runtime_value, + /// A value that can be represented with only an enum tag. + /// data is SimpleValue enum value. + simple_value, + /// A pointer to a decl. + /// data is extra index of `PtrDecl`, which contains the type and address. + ptr_decl, + /// A pointer to a decl that can be mutated at comptime. + /// data is extra index of `PtrMutDecl`, which contains the type and address. + ptr_mut_decl, + /// data is extra index of `PtrComptimeField`, which contains the pointer type and field value. + ptr_comptime_field, + /// A pointer with an integer value. + /// data is extra index of `PtrBase`, which contains the type and address. + /// Only pointer types are allowed to have this encoding. Optional types must use + /// `opt_payload` or `opt_null`. + ptr_int, + /// A pointer to the payload of an error union. + /// data is extra index of `PtrBase`, which contains the type and base pointer. + ptr_eu_payload, + /// A pointer to the payload of an optional. + /// data is extra index of `PtrBase`, which contains the type and base pointer. + ptr_opt_payload, + /// A pointer to an array element. + /// data is extra index of PtrBaseIndex, which contains the base array and element index. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the elem pointer type corresponding to this payload. + ptr_elem, + /// A pointer to a container field. + /// data is extra index of PtrBaseIndex, which contains the base container and field index. + ptr_field, + /// A slice. + /// data is extra index of PtrSlice, which contains the ptr and len values + ptr_slice, + /// An optional value that is non-null. + /// data is extra index of `TypeValue`. + /// The type is the optional type (not the payload type). + opt_payload, + /// An optional value that is null. + /// data is Index of the optional type. + opt_null, + /// Type: u8 + /// data is integer value + int_u8, + /// Type: u16 + /// data is integer value + int_u16, + /// Type: u32 /// data is integer value int_u32, - /// An unsigned integer value that can be represented by i32. + /// Type: i32 /// data is integer value bitcasted to u32. int_i32, - /// A positive integer value that does not fit in 32 bits. - /// data is a extra index to BigInt. - int_big_positive, - /// A negative integer value that does not fit in 32 bits. - /// data is a extra index to BigInt. - int_big_negative, - /// A float value that can be represented by f32. + /// A usize that fits in 32 bits. + /// data is integer value. + int_usize, + /// A comptime_int that fits in a u32. + /// data is integer value. + int_comptime_int_u32, + /// A comptime_int that fits in an i32. + /// data is integer value bitcasted to u32. + int_comptime_int_i32, + /// An integer value that fits in 32 bits with an explicitly provided type. + /// data is extra index of `IntSmall`. + int_small, + /// A positive integer value. + /// data is a limbs index to `Int`. + int_positive, + /// A negative integer value. + /// data is a limbs index to `Int`. + int_negative, + /// The ABI alignment of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_align, + /// The ABI size of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_size, + /// An error value. + /// data is extra index of `Key.Error`. + error_set_error, + /// An error union error. + /// data is extra index of `Key.Error`. + error_union_error, + /// An error union payload. + /// data is extra index of `TypeValue`. + error_union_payload, + /// An enum literal value. + /// data is `NullTerminatedString` of the error name. + enum_literal, + /// An enum tag value. + /// data is extra index of `EnumTag`. + enum_tag, + /// An f16 value. + /// data is float value bitcasted to u16 and zero-extended. + float_f16, + /// An f32 value. /// data is float value bitcasted to u32. float_f32, - /// A float value that can be represented by f64. - /// data is payload index to Float64. + /// An f64 value. + /// data is extra index to Float64. float_f64, - /// A float value that can be represented by f128. - /// data is payload index to Float128. + /// An f80 value. + /// data is extra index to Float80. + float_f80, + /// An f128 value. + /// data is extra index to Float128. float_f128, + /// A c_longdouble value of 80 bits. + /// data is extra index to Float80. + /// This is used when a c_longdouble value is provided as an f80, because f80 has unnormalized + /// values which cannot be losslessly represented as f128. It should only be used when the type + /// underlying c_longdouble for the target is 80 bits. + float_c_longdouble_f80, + /// A c_longdouble value of 128 bits. + /// data is extra index to Float128. + /// This is used when a c_longdouble value is provided as any type other than an f80, since all + /// other float types can be losslessly converted to and from f128. + float_c_longdouble_f128, + /// A comptime_float value. + /// data is extra index to Float128. + float_comptime_float, + /// A global variable. + /// data is extra index to Variable. + variable, + /// An extern function. + /// data is extra index to Key.ExternFunc. + extern_func, + /// A regular function. + /// data is extra index to Func. + func, + /// This represents the only possible value for *some* types which have + /// only one possible value. Not all only-possible-values are encoded this way; + /// for example structs which have all comptime fields are not encoded this way. + /// The set of values that are encoded this way is: + /// * An array or vector which has length 0. + /// * A struct which has all fields comptime-known. + /// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909 + /// data is Index of the type, which is known to be zero bits at runtime. + only_possible_value, + /// data is extra index to Key.Union. + union_value, + /// An array of bytes. + /// data is extra index to `Bytes`. + bytes, + /// An instance of a struct, array, or vector. + /// data is extra index to `Aggregate`. + aggregate, + /// An instance of an array or vector with every element being the same value. + /// data is extra index to `Repeated`. + repeated, + + /// A memoized comptime function call result. + /// data is extra index to `MemoizedCall` + memoized_call, + + const ErrorUnionType = Key.ErrorUnionType; + const OpaqueType = Key.OpaqueType; + const TypeValue = Key.TypeValue; + const Error = Key.Error; + const EnumTag = Key.EnumTag; + const ExternFunc = Key.ExternFunc; + const Func = Key.Func; + const Union = Key.Union; + const TypePointer = Key.PtrType; + + fn Payload(comptime tag: Tag) type { + return switch (tag) { + .type_int_signed => unreachable, + .type_int_unsigned => unreachable, + .type_array_big => Array, + .type_array_small => Vector, + .type_vector => Vector, + .type_pointer => TypePointer, + .type_slice => unreachable, + .type_optional => unreachable, + .type_anyframe => unreachable, + .type_error_union => ErrorUnionType, + .type_error_set => ErrorSet, + .type_inferred_error_set => unreachable, + .type_enum_auto => EnumAuto, + .type_enum_explicit => EnumExplicit, + .type_enum_nonexhaustive => EnumExplicit, + .simple_type => unreachable, + .type_opaque => OpaqueType, + .type_struct => unreachable, + .type_struct_ns => unreachable, + .type_struct_anon => TypeStructAnon, + .type_tuple_anon => TypeStructAnon, + .type_union_tagged => unreachable, + .type_union_untagged => unreachable, + .type_union_safety => unreachable, + .type_function => TypeFunction, + + .undef => unreachable, + .runtime_value => TypeValue, + .simple_value => unreachable, + .ptr_decl => PtrDecl, + .ptr_mut_decl => PtrMutDecl, + .ptr_comptime_field => PtrComptimeField, + .ptr_int => PtrBase, + .ptr_eu_payload => PtrBase, + .ptr_opt_payload => PtrBase, + .ptr_elem => PtrBaseIndex, + .ptr_field => PtrBaseIndex, + .ptr_slice => PtrSlice, + .opt_payload => TypeValue, + .opt_null => unreachable, + .int_u8 => unreachable, + .int_u16 => unreachable, + .int_u32 => unreachable, + .int_i32 => unreachable, + .int_usize => unreachable, + .int_comptime_int_u32 => unreachable, + .int_comptime_int_i32 => unreachable, + .int_small => IntSmall, + .int_positive => unreachable, + .int_negative => unreachable, + .int_lazy_align => IntLazy, + .int_lazy_size => IntLazy, + .error_set_error => Error, + .error_union_error => Error, + .error_union_payload => TypeValue, + .enum_literal => unreachable, + .enum_tag => EnumTag, + .float_f16 => unreachable, + .float_f32 => unreachable, + .float_f64 => unreachable, + .float_f80 => unreachable, + .float_f128 => unreachable, + .float_c_longdouble_f80 => unreachable, + .float_c_longdouble_f128 => unreachable, + .float_comptime_float => unreachable, + .variable => Variable, + .extern_func => ExternFunc, + .func => Func, + .only_possible_value => unreachable, + .union_value => Union, + .bytes => Bytes, + .aggregate => Aggregate, + .repeated => Repeated, + .memoized_call => MemoizedCall, + }; + } + + pub const Variable = struct { + ty: Index, + /// May be `none`. + init: Index, + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" var stderrp = ...` would have 'c' as library name. + lib_name: OptionalNullTerminatedString, + flags: Flags, + + pub const Flags = packed struct(u32) { + is_extern: bool, + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + _: u28 = 0, + }; + }; + + /// Trailing: + /// 0. element: Index for each len + /// len is determined by the aggregate type. + pub const Aggregate = struct { + /// The type of the aggregate. + ty: Index, + }; }; -pub const Simple = enum(u32) { +/// Trailing: +/// 0. name: NullTerminatedString for each names_len +pub const ErrorSet = struct { + names_len: u32, + /// Maps error names to declaration index. + names_map: MapIndex, +}; + +/// Trailing: +/// 0. param_type: Index for each params_len +pub const TypeFunction = struct { + params_len: u32, + return_type: Index, + comptime_bits: u32, + noalias_bits: u32, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + _: u11 = 0, + }; +}; + +pub const Bytes = struct { + /// The type of the aggregate + ty: Index, + /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + bytes: String, +}; + +pub const Repeated = struct { + /// The type of the aggregate. + ty: Index, + /// The value of every element. + elem_val: Index, +}; + +/// Trailing: +/// 0. type: Index for each fields_len +/// 1. value: Index for each fields_len +/// 2. name: NullTerminatedString for each fields_len +/// The set of field names is omitted when the `Tag` is `type_tuple_anon`. +pub const TypeStructAnon = struct { + fields_len: u32, +}; + +/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to +/// implement logic that only wants to deal with types because the logic can +/// ignore all simple values. Note that technically, types are values. +pub const SimpleType = enum(u32) { f16, f32, f64, @@ -147,6 +2199,7 @@ pub const Simple = enum(u32) { f128, usize, isize, + c_char, c_short, c_ushort, c_int, @@ -164,29 +2217,339 @@ pub const Simple = enum(u32) { comptime_int, comptime_float, noreturn, - @"anyframe", - null_type, - undefined_type, - enum_literal_type, - undefined, - void_value, null, - bool_true, - bool_false, + undefined, + enum_literal, + + atomic_order, + atomic_rmw_op, + calling_convention, + address_space, + float_mode, + reduce_op, + call_modifier, + prefetch_options, + export_options, + extern_options, + type_info, + + generic_poison, }; -pub const Array = struct { +pub const SimpleValue = enum(u32) { + /// This is untyped `undefined`. + undefined, + void, + /// This is untyped `null`. + null, + /// This is the untyped empty struct literal: `.{}` + empty_struct, + true, + false, + @"unreachable", + + generic_poison, +}; + +/// Stored as a power-of-two, with one special value to indicate none. +pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnitsOptional(a: Alignment) ?u64 { + return switch (a) { + .none => null, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + assert(std.math.isPowerOfTwo(n)); + return @intToEnum(Alignment, @ctz(n)); + } + + pub fn fromNonzeroByteUnits(n: u64) Alignment { + assert(n != 0); + return fromByteUnits(n); + } + + pub fn min(a: Alignment, b: Alignment) Alignment { + return @intToEnum(Alignment, @min(@enumToInt(a), @enumToInt(b))); + } +}; + +/// Used for non-sentineled arrays that have length fitting in u32, as well as +/// vectors. +pub const Vector = struct { len: u32, child: Index, }; +pub const Array = struct { + len0: u32, + len1: u32, + child: Index, + sentinel: Index, + + pub const Length = PackedU64; + + pub fn getLength(a: Array) u64 { + return (PackedU64{ + .a = a.len0, + .b = a.len1, + }).get(); + } +}; + +/// Trailing: +/// 0. field name: NullTerminatedString for each fields_len; declaration order +/// 1. tag value: Index for each fields_len; declaration order +pub const EnumExplicit = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// This may be `none` if there are no declarations. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum, which + /// has been explicitly provided by the enum declaration. + int_tag_type: Index, + fields_len: u32, + /// Maps field names to declaration index. + names_map: MapIndex, + /// Maps field values to declaration index. + /// If this is `none`, it means the trailing tag values are absent because + /// they are auto-numbered. + values_map: OptionalMapIndex, +}; + +/// Trailing: +/// 0. field name: NullTerminatedString for each fields_len; declaration order +pub const EnumAuto = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// This may be `none` if there are no declarations. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum, which + /// was inferred by Zig based on the number of tags. + int_tag_type: Index, + fields_len: u32, + /// Maps field names to declaration index. + names_map: MapIndex, +}; + +pub const PackedU64 = packed struct(u64) { + a: u32, + b: u32, + + pub fn get(x: PackedU64) u64 { + return @bitCast(u64, x); + } + + pub fn init(x: u64) PackedU64 { + return @bitCast(PackedU64, x); + } +}; + +pub const PtrDecl = struct { + ty: Index, + decl: Module.Decl.Index, +}; + +pub const PtrMutDecl = struct { + ty: Index, + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, +}; + +pub const PtrComptimeField = struct { + ty: Index, + field_val: Index, +}; + +pub const PtrBase = struct { + ty: Index, + base: Index, +}; + +pub const PtrBaseIndex = struct { + ty: Index, + base: Index, + index: Index, +}; + +pub const PtrSlice = struct { + /// The slice type. + ty: Index, + /// A many pointer value. + ptr: Index, + /// A usize value. + len: Index, +}; + +/// Trailing: Limb for every limbs_len +pub const Int = struct { + ty: Index, + limbs_len: u32, +}; + +pub const IntSmall = struct { + ty: Index, + value: u32, +}; + +pub const IntLazy = struct { + ty: Index, + lazy_ty: Index, +}; + +/// A f64 value, broken up into 2 u32 parts. +pub const Float64 = struct { + piece0: u32, + piece1: u32, + + pub fn get(self: Float64) f64 { + const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); + return @bitCast(f64, int_bits); + } + + fn pack(val: f64) Float64 { + const bits = @bitCast(u64, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + }; + } +}; + +/// A f80 value, broken up into 2 u32 parts and a u16 part zero-padded to a u32. +pub const Float80 = struct { + piece0: u32, + piece1: u32, + piece2: u32, // u16 part, top bits + + pub fn get(self: Float80) f80 { + const int_bits = @as(u80, self.piece0) | + (@as(u80, self.piece1) << 32) | + (@as(u80, self.piece2) << 64); + return @bitCast(f80, int_bits); + } + + fn pack(val: f80) Float80 { + const bits = @bitCast(u80, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + .piece2 = @truncate(u16, bits >> 64), + }; + } +}; + +/// A f128 value, broken up into 4 u32 parts. +pub const Float128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: Float128) f128 { + const int_bits = @as(u128, self.piece0) | + (@as(u128, self.piece1) << 32) | + (@as(u128, self.piece2) << 64) | + (@as(u128, self.piece3) << 96); + return @bitCast(f128, int_bits); + } + + fn pack(val: f128) Float128 { + const bits = @bitCast(u128, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + .piece2 = @truncate(u32, bits >> 64), + .piece3 = @truncate(u32, bits >> 96), + }; + } +}; + +/// Trailing: +/// 0. arg value: Index for each args_len +pub const MemoizedCall = struct { + func: Module.Fn.Index, + args_len: u32, + result: Index, +}; + +pub fn init(ip: *InternPool, gpa: Allocator) !void { + assert(ip.items.len == 0); + + // Reserve string index 0 for an empty string. + assert((try ip.getOrPutString(gpa, "")) == .empty); + + // So that we can use `catch unreachable` below. + try ip.items.ensureUnusedCapacity(gpa, static_keys.len); + try ip.map.ensureUnusedCapacity(gpa, static_keys.len); + try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); + + // This inserts all the statically-known values into the intern pool in the + // order expected. + for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; + + if (std.debug.runtime_safety) { + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + const cc_inline = ip.indexToKey(.calling_convention_inline).enum_tag.int; + const cc_c = ip.indexToKey(.calling_convention_c).enum_tag.int; + + assert(ip.indexToKey(cc_inline).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.Inline)); + + assert(ip.indexToKey(cc_c).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.C)); + + assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == + @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); + } + + assert(ip.items.len == static_keys.len); +} + pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); + ip.limbs.deinit(gpa); + ip.string_bytes.deinit(gpa); + + ip.structs_free_list.deinit(gpa); + ip.allocated_structs.deinit(gpa); + + ip.unions_free_list.deinit(gpa); + ip.allocated_unions.deinit(gpa); + + ip.funcs_free_list.deinit(gpa); + ip.allocated_funcs.deinit(gpa); + + ip.inferred_error_sets_free_list.deinit(gpa); + ip.allocated_inferred_error_sets.deinit(gpa); + + for (ip.maps.items) |*map| map.deinit(gpa); + ip.maps.deinit(gpa); + + ip.string_table.deinit(gpa); + + ip.* = undefined; } -pub fn indexToKey(ip: InternPool, index: Index) Key { +pub fn indexToKey(ip: *const InternPool, index: Index) Key { + assert(index != .none); const item = ip.items.get(@enumToInt(index)); const data = item.data; return switch (item.tag) { @@ -202,89 +2565,1930 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .bits = @intCast(u16, data), }, }, - .type_array => { + .type_array_big => { const array_info = ip.extraData(Array, data); + return .{ .array_type = .{ + .len = array_info.getLength(), + .child = array_info.child, + .sentinel = array_info.sentinel, + } }; + }, + .type_array_small => { + const array_info = ip.extraData(Vector, data); return .{ .array_type = .{ .len = array_info.len, .child = array_info.child, .sentinel = .none, } }; }, - .simple => .{ .simple = @intToEnum(Simple, data) }, + .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, + .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, - else => @panic("TODO"), + .type_vector => { + const vector_info = ip.extraData(Vector, data); + return .{ .vector_type = .{ + .len = vector_info.len, + .child = vector_info.child, + } }; + }, + + .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, + + .type_slice => { + assert(ip.items.items(.tag)[data] == .type_pointer); + var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]); + ptr_info.flags.size = .Slice; + return .{ .ptr_type = ptr_info }; + }, + + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, + .type_anyframe => .{ .anyframe_type = @intToEnum(Index, data) }, + + .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, + .type_error_set => { + const error_set = ip.extraDataTrail(ErrorSet, data); + const names_len = error_set.data.names_len; + const names = ip.extra.items[error_set.end..][0..names_len]; + return .{ .error_set_type = .{ + .names = @ptrCast([]const NullTerminatedString, names), + .names_map = error_set.data.names_map.toOptional(), + } }; + }, + .type_inferred_error_set => .{ + .inferred_error_set_type = @intToEnum(Module.Fn.InferredErrorSet.Index, data), + }, + + .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, + .type_struct => { + const struct_index = @intToEnum(Module.Struct.OptionalIndex, data); + const namespace = if (struct_index.unwrap()) |i| + ip.structPtrConst(i).namespace.toOptional() + else + .none; + return .{ .struct_type = .{ + .index = struct_index, + .namespace = namespace, + } }; + }, + .type_struct_ns => .{ .struct_type = .{ + .index = .none, + .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), + } }, + + .type_struct_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = @ptrCast([]const NullTerminatedString, names), + } }; + }, + .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = &.{}, + } }; + }, + + .type_union_untagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .none, + } }, + .type_union_tagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .tagged, + } }, + .type_union_safety => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .safety, + } }, + + .type_enum_auto => { + const enum_auto = ip.extraDataTrail(EnumAuto, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len], + ); + return .{ .enum_type = .{ + .decl = enum_auto.data.decl, + .namespace = enum_auto.data.namespace, + .tag_ty = enum_auto.data.int_tag_type, + .names = names, + .values = &.{}, + .tag_mode = .auto, + .names_map = enum_auto.data.names_map.toOptional(), + .values_map = .none, + } }; + }, + .type_enum_explicit => ip.indexToKeyEnum(data, .explicit), + .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), + .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, + + .undef => .{ .undef = @intToEnum(Index, data) }, + .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, + .opt_null => .{ .opt = .{ + .ty = @intToEnum(Index, data), + .val = .none, + } }, + .opt_payload => { + const extra = ip.extraData(Tag.TypeValue, data); + return .{ .opt = .{ + .ty = extra.ty, + .val = extra.val, + } }; + }, + .ptr_decl => { + const info = ip.extraData(PtrDecl, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .decl = info.decl }, + } }; + }, + .ptr_mut_decl => { + const info = ip.extraData(PtrMutDecl, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .mut_decl = .{ + .decl = info.decl, + .runtime_index = info.runtime_index, + } }, + } }; + }, + .ptr_comptime_field => { + const info = ip.extraData(PtrComptimeField, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .comptime_field = info.field_val }, + } }; + }, + .ptr_int => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .int = info.base }, + } }; + }, + .ptr_eu_payload => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .eu_payload = info.base }, + } }; + }, + .ptr_opt_payload => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .opt_payload = info.base }, + } }; + }, + .ptr_elem => { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const info = ip.extraData(PtrBaseIndex, data); + const index_item = ip.items.get(@enumToInt(info.index)); + return switch (index_item.tag) { + .int_usize => .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .elem = .{ + .base = info.base, + .index = index_item.data, + } }, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_field => { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const info = ip.extraData(PtrBaseIndex, data); + const index_item = ip.items.get(@enumToInt(info.index)); + return switch (index_item.tag) { + .int_usize => .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .field = .{ + .base = info.base, + .index = index_item.data, + } }, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_slice => { + const info = ip.extraData(PtrSlice, data); + const ptr_item = ip.items.get(@enumToInt(info.ptr)); + return .{ + .ptr = .{ + .ty = info.ty, + .addr = switch (ptr_item.tag) { + .ptr_decl => .{ + .decl = ip.extraData(PtrDecl, ptr_item.data).decl, + }, + .ptr_mut_decl => b: { + const sub_info = ip.extraData(PtrMutDecl, ptr_item.data); + break :b .{ .mut_decl = .{ + .decl = sub_info.decl, + .runtime_index = sub_info.runtime_index, + } }; + }, + .ptr_comptime_field => .{ + .comptime_field = ip.extraData(PtrComptimeField, ptr_item.data).field_val, + }, + .ptr_int => .{ + .int = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_eu_payload => .{ + .eu_payload = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_opt_payload => .{ + .opt_payload = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_elem => b: { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const sub_info = ip.extraData(PtrBaseIndex, ptr_item.data); + const index_item = ip.items.get(@enumToInt(sub_info.index)); + break :b switch (index_item.tag) { + .int_usize => .{ .elem = .{ + .base = sub_info.base, + .index = index_item.data, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_field => b: { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const sub_info = ip.extraData(PtrBaseIndex, ptr_item.data); + const index_item = ip.items.get(@enumToInt(sub_info.index)); + break :b switch (index_item.tag) { + .int_usize => .{ .field = .{ + .base = sub_info.base, + .index = index_item.data, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + else => unreachable, + }, + .len = info.len, + }, + }; + }, + .int_u8 => .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = data }, + } }, + .int_u16 => .{ .int = .{ + .ty = .u16_type, + .storage = .{ .u64 = data }, + } }, + .int_u32 => .{ .int = .{ + .ty = .u32_type, + .storage = .{ .u64 = data }, + } }, + .int_i32 => .{ .int = .{ + .ty = .i32_type, + .storage = .{ .i64 = @bitCast(i32, data) }, + } }, + .int_usize => .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = data }, + } }, + .int_comptime_int_u32 => .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = data }, + } }, + .int_comptime_int_i32 => .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = @bitCast(i32, data) }, + } }, + .int_positive => ip.indexToKeyBigInt(data, true), + .int_negative => ip.indexToKeyBigInt(data, false), + .int_small => { + const info = ip.extraData(IntSmall, data); + return .{ .int = .{ + .ty = info.ty, + .storage = .{ .u64 = info.value }, + } }; + }, + .int_lazy_align, .int_lazy_size => |tag| { + const info = ip.extraData(IntLazy, data); + return .{ .int = .{ + .ty = info.ty, + .storage = switch (tag) { + .int_lazy_align => .{ .lazy_align = info.lazy_ty }, + .int_lazy_size => .{ .lazy_size = info.lazy_ty }, + else => unreachable, + }, + } }; + }, + .float_f16 => .{ .float = .{ + .ty = .f16_type, + .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, + } }, + .float_f32 => .{ .float = .{ + .ty = .f32_type, + .storage = .{ .f32 = @bitCast(f32, data) }, + } }, + .float_f64 => .{ .float = .{ + .ty = .f64_type, + .storage = .{ .f64 = ip.extraData(Float64, data).get() }, + } }, + .float_f80 => .{ .float = .{ + .ty = .f80_type, + .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + } }, + .float_f128 => .{ .float = .{ + .ty = .f128_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .float_c_longdouble_f80 => .{ .float = .{ + .ty = .c_longdouble_type, + .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + } }, + .float_c_longdouble_f128 => .{ .float = .{ + .ty = .c_longdouble_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .float_comptime_float => .{ .float = .{ + .ty = .comptime_float_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .variable => { + const extra = ip.extraData(Tag.Variable, data); + return .{ .variable = .{ + .ty = extra.ty, + .init = extra.init, + .decl = extra.decl, + .lib_name = extra.lib_name, + .is_extern = extra.flags.is_extern, + .is_const = extra.flags.is_const, + .is_threadlocal = extra.flags.is_threadlocal, + .is_weak_linkage = extra.flags.is_weak_linkage, + } }; + }, + .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, + .func => .{ .func = ip.extraData(Tag.Func, data) }, + .only_possible_value => { + const ty = @intToEnum(Index, data); + const ty_item = ip.items.get(@enumToInt(ty)); + return switch (ty_item.tag) { + .type_array_big => { + const sentinel = @ptrCast( + *const [1]Index, + &ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?], + ); + return .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = sentinel[0..@boolToInt(sentinel[0] != .none)] }, + } }; + }, + .type_array_small, .type_vector => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, + // TODO: migrate structs to properly use the InternPool rather + // than using the SegmentedList trick, then the struct type will + // have a slice of comptime values that can be used here for when + // the struct has one possible value due to all fields comptime (same + // as the tuple case below). + .type_struct, .type_struct_ns => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, + + // There is only one possible value precisely due to the + // fact that this values slice is fully populated! + .type_struct_anon, .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, ty_item.data); + const fields_len = type_struct_anon.data.fields_len; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = @ptrCast([]const Index, values) }, + } }; + }, + + .type_enum_auto, + .type_enum_explicit, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .{ .empty_enum_value = ty }, + + else => unreachable, + }; + }, + .bytes => { + const extra = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty)); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] }, + } }; + }, + .aggregate => { + const extra = ip.extraDataTrail(Tag.Aggregate, data); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); + const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); + return .{ .aggregate = .{ + .ty = extra.data.ty, + .storage = .{ .elems = fields }, + } }; + }, + .repeated => { + const extra = ip.extraData(Repeated, data); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .repeated_elem = extra.elem_val }, + } }; + }, + .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .error_union_error => { + const extra = ip.extraData(Key.Error, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .err_name = extra.name }, + } }; + }, + .error_union_payload => { + const extra = ip.extraData(Tag.TypeValue, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .payload = extra.val }, + } }; + }, + .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, + .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, + + .memoized_call => { + const extra = ip.extraDataTrail(MemoizedCall, data); + return .{ .memoized_call = .{ + .func = extra.data.func, + .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]), + .result = extra.data.result, + } }; + }, }; } +fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { + const type_function = ip.extraDataTrail(TypeFunction, data); + const param_types = @ptrCast( + []Index, + ip.extra.items[type_function.end..][0..type_function.data.params_len], + ); + return .{ + .param_types = param_types, + .return_type = type_function.data.return_type, + .comptime_bits = type_function.data.comptime_bits, + .noalias_bits = type_function.data.noalias_bits, + .alignment = type_function.data.flags.alignment, + .cc = type_function.data.flags.cc, + .is_var_args = type_function.data.flags.is_var_args, + .is_generic = type_function.data.flags.is_generic, + .is_noinline = type_function.data.flags.is_noinline, + .align_is_generic = type_function.data.flags.align_is_generic, + .cc_is_generic = type_function.data.flags.cc_is_generic, + .section_is_generic = type_function.data.flags.section_is_generic, + .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, + }; +} + +fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { + const enum_explicit = ip.extraDataTrail(EnumExplicit, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + ); + const values = if (enum_explicit.data.values_map != .none) @ptrCast( + []const Index, + ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + ) else &[0]Index{}; + + return .{ .enum_type = .{ + .decl = enum_explicit.data.decl, + .namespace = enum_explicit.data.namespace, + .tag_ty = enum_explicit.data.int_tag_type, + .names = names, + .values = values, + .tag_mode = tag_mode, + .names_map = enum_explicit.data.names_map.toOptional(), + .values_map = enum_explicit.data.values_map, + } }; +} + +fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { + const int_info = ip.limbData(Int, limb_index); + return .{ .int = .{ + .ty = int_info.ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbSlice(Int, limb_index, int_info.limbs_len), + .positive = positive, + } }, + } }; +} + pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) { - return @intToEnum(Index, gop.index); - } + if (gop.found_existing) return @intToEnum(Index, gop.index); + try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { - const tag: Tag = switch (int_type.signedness) { + const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; - try ip.items.append(gpa, .{ - .tag = tag, + ip.items.appendAssumeCapacity(.{ + .tag = t, .data = int_type.bits, }); }, + .ptr_type => |ptr_type| { + assert(ptr_type.child != .none); + assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child); + + if (ptr_type.flags.size == .Slice) { + _ = ip.map.pop(); + var new_key = key; + new_key.ptr_type.flags.size = .Many; + const ptr_type_index = try ip.get(gpa, new_key); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .type_slice, + .data = @enumToInt(ptr_type_index), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + var ptr_type_adjusted = ptr_type; + if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true; + + ip.items.appendAssumeCapacity(.{ + .tag = .type_pointer, + .data = try ip.addExtra(gpa, ptr_type_adjusted), + }); + }, .array_type => |array_type| { - const len = @intCast(u32, array_type.len); // TODO have a big_array encoding - assert(array_type.sentinel == .none); // TODO have a sentinel_array encoding - try ip.items.append(gpa, .{ - .tag = .type_array, + assert(array_type.child != .none); + assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child); + + if (std.math.cast(u32, array_type.len)) |len| { + if (array_type.sentinel == .none) { + ip.items.appendAssumeCapacity(.{ + .tag = .type_array_small, + .data = try ip.addExtra(gpa, Vector{ + .len = len, + .child = array_type.child, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + } + + const length = Array.Length.init(array_type.len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_array_big, .data = try ip.addExtra(gpa, Array{ - .len = len, + .len0 = length.a, + .len1 = length.b, .child = array_type.child, + .sentinel = array_type.sentinel, }), }); }, - else => @panic("TODO"), + .vector_type => |vector_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_vector, + .data = try ip.addExtra(gpa, Vector{ + .len = vector_type.len, + .child = vector_type.child, + }), + }); + }, + .opt_type => |payload_type| { + assert(payload_type != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .type_optional, + .data = @enumToInt(payload_type), + }); + }, + .anyframe_type => |payload_type| { + // payload_type might be none, indicating the type is `anyframe`. + ip.items.appendAssumeCapacity(.{ + .tag = .type_anyframe, + .data = @enumToInt(payload_type), + }); + }, + .error_union_type => |error_union_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_union, + .data = try ip.addExtra(gpa, error_union_type), + }); + }, + .error_set_type => |error_set_type| { + assert(error_set_type.names_map == .none); + assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, error_set_type.names); + const names_len = @intCast(u32, error_set_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_set, + .data = ip.addExtraAssumeCapacity(ErrorSet{ + .names_len = names_len, + .names_map = names_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); + }, + .inferred_error_set_type => |ies_index| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @enumToInt(ies_index), + }); + }, + .simple_type => |simple_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .simple_type, + .data = @enumToInt(simple_type), + }); + }, + .simple_value => |simple_value| { + ip.items.appendAssumeCapacity(.{ + .tag = .simple_value, + .data = @enumToInt(simple_value), + }); + }, + .undef => |ty| { + assert(ty != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .undef, + .data = @enumToInt(ty), + }); + }, + .runtime_value => |runtime_value| { + assert(runtime_value.ty == ip.typeOf(runtime_value.val)); + ip.items.appendAssumeCapacity(.{ + .tag = .runtime_value, + .data = try ip.addExtra(gpa, runtime_value), + }); + }, + + .struct_type => |struct_type| { + ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ + .tag = .type_struct, + .data = @enumToInt(i), + } else if (struct_type.namespace.unwrap()) |i| .{ + .tag = .type_struct_ns, + .data = @enumToInt(i), + } else .{ + .tag = .type_struct, + .data = @enumToInt(Module.Struct.OptionalIndex.none), + }); + }, + + .anon_struct_type => |anon_struct_type| { + assert(anon_struct_type.types.len == anon_struct_type.values.len); + for (anon_struct_type.types) |elem| assert(elem != .none); + + const fields_len = @intCast(u32, anon_struct_type.types.len); + if (anon_struct_type.names.len == 0) { + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_tuple_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + return @intToEnum(Index, ip.items.len - 1); + } + + assert(anon_struct_type.names.len == anon_struct_type.types.len); + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_struct_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + + .union_type => |union_type| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (union_type.runtime_tag) { + .none => .type_union_untagged, + .safety => .type_union_safety, + .tagged => .type_union_tagged, + }, + .data = @enumToInt(union_type.index), + }); + }, + + .opaque_type => |opaque_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_opaque, + .data = try ip.addExtra(gpa, opaque_type), + }); + }, + + .enum_type => |enum_type| { + assert(enum_type.tag_ty == .noreturn_type or ip.isIntegerType(enum_type.tag_ty)); + for (enum_type.values) |value| assert(ip.typeOf(value) == enum_type.tag_ty); + assert(enum_type.names_map == .none); + assert(enum_type.values_map == .none); + + switch (enum_type.tag_mode) { + .auto => { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .names_map = names_map, + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive), + } + }, + + .func_type => |func_type| { + assert(func_type.return_type != .none); + for (func_type.param_types) |param_type| assert(param_type != .none); + + const params_len = @intCast(u32, func_type.param_types.len); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + + params_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = ip.addExtraAssumeCapacity(TypeFunction{ + .params_len = params_len, + .return_type = func_type.return_type, + .comptime_bits = func_type.comptime_bits, + .noalias_bits = func_type.noalias_bits, + .flags = .{ + .alignment = func_type.alignment, + .cc = func_type.cc, + .is_var_args = func_type.is_var_args, + .is_generic = func_type.is_generic, + .is_noinline = func_type.is_noinline, + .align_is_generic = func_type.align_is_generic, + .cc_is_generic = func_type.cc_is_generic, + .section_is_generic = func_type.section_is_generic, + .addrspace_is_generic = func_type.addrspace_is_generic, + }, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + }, + + .variable => |variable| { + const has_init = variable.init != .none; + if (has_init) assert(variable.ty == ip.typeOf(variable.init)); + ip.items.appendAssumeCapacity(.{ + .tag = .variable, + .data = try ip.addExtra(gpa, Tag.Variable{ + .ty = variable.ty, + .init = variable.init, + .decl = variable.decl, + .lib_name = variable.lib_name, + .flags = .{ + .is_extern = variable.is_extern, + .is_const = variable.is_const, + .is_threadlocal = variable.is_threadlocal, + .is_weak_linkage = variable.is_weak_linkage, + }, + }), + }); + }, + + .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ + .tag = .extern_func, + .data = try ip.addExtra(gpa, @as(Tag.ExternFunc, extern_func)), + }), + + .func => |func| ip.items.appendAssumeCapacity(.{ + .tag = .func, + .data = try ip.addExtra(gpa, @as(Tag.Func, func)), + }), + + .ptr => |ptr| { + const ptr_type = ip.indexToKey(ptr.ty).ptr_type; + switch (ptr.len) { + .none => { + assert(ptr_type.flags.size != .Slice); + switch (ptr.addr) { + .decl => |decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_decl, + .data = try ip.addExtra(gpa, PtrDecl{ + .ty = ptr.ty, + .decl = decl, + }), + }), + .mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_mut_decl, + .data = try ip.addExtra(gpa, PtrMutDecl{ + .ty = ptr.ty, + .decl = mut_decl.decl, + .runtime_index = mut_decl.runtime_index, + }), + }), + .comptime_field => |field_val| { + assert(field_val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ + .ty = ptr.ty, + .field_val = field_val, + }), + }); + }, + .int, .eu_payload, .opt_payload => |base| { + switch (ptr.addr) { + .int => assert(ip.typeOf(base) == .usize_type), + .eu_payload => assert(ip.indexToKey( + ip.indexToKey(ip.typeOf(base)).ptr_type.child, + ) == .error_union_type), + .opt_payload => assert(ip.indexToKey( + ip.indexToKey(ip.typeOf(base)).ptr_type.child, + ) == .opt_type), + else => unreachable, + } + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .int => .ptr_int, + .eu_payload => .ptr_eu_payload, + .opt_payload => .ptr_opt_payload, + else => unreachable, + }, + .data = try ip.addExtra(gpa, PtrBase{ + .ty = ptr.ty, + .base = base, + }), + }); + }, + .elem, .field => |base_index| { + const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; + switch (ptr.addr) { + .elem => assert(base_ptr_type.flags.size == .Many), + .field => { + assert(base_ptr_type.flags.size == .One); + switch (ip.indexToKey(base_ptr_type.child)) { + .anon_struct_type => |anon_struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < anon_struct_type.types.len); + }, + .struct_type => |struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count()); + }, + .union_type => |union_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.unionPtrConst(union_type.index).fields.count()); + }, + .ptr_type => |slice_type| { + assert(ptr.addr == .field); + assert(slice_type.flags.size == .Slice); + assert(base_index.index < 2); + }, + else => unreachable, + } + }, + else => unreachable, + } + _ = ip.map.pop(); + const index_index = try ip.get(gpa, .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = base_index.index }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .elem => .ptr_elem, + .field => .ptr_field, + else => unreachable, + }, + .data = try ip.addExtra(gpa, PtrBaseIndex{ + .ty = ptr.ty, + .base = base_index.base, + .index = index_index, + }), + }); + }, + } + }, + else => { + // TODO: change Key.Ptr for slices to reference the manyptr value + // rather than having an addr field directly. Then we can avoid + // these problematic calls to pop(), get(), and getOrPutAdapted(). + assert(ptr_type.flags.size == .Slice); + _ = ip.map.pop(); + var new_key = key; + new_key.ptr.ty = ip.slicePtrType(ptr.ty); + new_key.ptr.len = .none; + assert(ip.indexToKey(new_key.ptr.ty).ptr_type.flags.size == .Many); + const ptr_index = try ip.get(gpa, new_key); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_slice, + .data = try ip.addExtra(gpa, PtrSlice{ + .ty = ptr.ty, + .ptr = ptr_index, + .len = ptr.len, + }), + }); + }, + } + assert(ptr.ty == ip.indexToKey(@intToEnum(Index, ip.items.len - 1)).ptr.ty); + }, + + .opt => |opt| { + assert(ip.isOptionalType(opt.ty)); + assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val)); + ip.items.appendAssumeCapacity(if (opt.val == .none) .{ + .tag = .opt_null, + .data = @enumToInt(opt.ty), + } else .{ + .tag = .opt_payload, + .data = try ip.addExtra(gpa, Tag.TypeValue{ + .ty = opt.ty, + .val = opt.val, + }), + }); + }, + + .int => |int| b: { + assert(ip.isIntegerType(int.ty)); + switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (int.storage) { + else => unreachable, + .lazy_align => .int_lazy_align, + .lazy_size => .int_lazy_size, + }, + .data = try ip.addExtra(gpa, IntLazy{ + .ty = int.ty, + .lazy_ty = lazy_ty, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + } + switch (int.ty) { + .u8_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u8, + .data = big_int.to(u8) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u8, + .data = @intCast(u8, x), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .u16_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = big_int.to(u16) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = @intCast(u16, x), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .u32_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = big_int.to(u32) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = @intCast(u32, x), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .i32_type => switch (int.storage) { + .big_int => |big_int| { + const casted = big_int.to(i32) catch unreachable; + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, @intCast(i32, x)), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .usize_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_usize, + .data = casted, + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_usize, + .data = casted, + }); + break :b; + } + }, + .lazy_align, .lazy_size => unreachable, + }, + .comptime_int_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_u32, + .data = casted, + }); + break :b; + } else |_| {} + if (big_int.to(i32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_u32, + .data = casted, + }); + break :b; + } + if (std.math.cast(i32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } + }, + .lazy_align, .lazy_size => unreachable, + }, + else => {}, + } + switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } else |_| {} + + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, big_int.limbs); + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + var buf: [2]Limb = undefined; + const big_int = BigIntMutable.init(&buf, x).toConst(); + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, big_int.limbs); + }, + .lazy_align, .lazy_size => unreachable, + } + }, + + .err => |err| { + assert(ip.isErrorSetType(err.ty)); + ip.items.appendAssumeCapacity(.{ + .tag = .error_set_error, + .data = try ip.addExtra(gpa, err), + }); + }, + + .error_union => |error_union| { + assert(ip.isErrorUnionType(error_union.ty)); + ip.items.appendAssumeCapacity(switch (error_union.val) { + .err_name => |err_name| .{ + .tag = .error_union_error, + .data = try ip.addExtra(gpa, Key.Error{ + .ty = error_union.ty, + .name = err_name, + }), + }, + .payload => |payload| .{ + .tag = .error_union_payload, + .data = try ip.addExtra(gpa, Tag.TypeValue{ + .ty = error_union.ty, + .val = payload, + }), + }, + }); + }, + + .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .tag = .enum_literal, + .data = @enumToInt(enum_literal), + }), + + .enum_tag => |enum_tag| { + assert(ip.isEnumType(enum_tag.ty)); + switch (ip.indexToKey(enum_tag.ty)) { + .simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))), + .enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty), + else => unreachable, + } + ip.items.appendAssumeCapacity(.{ + .tag = .enum_tag, + .data = try ip.addExtra(gpa, enum_tag), + }); + }, + + .empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(enum_or_union_ty), + }), + + .float => |float| { + switch (float.ty) { + .f16_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f16, + .data = @bitCast(u16, float.storage.f16), + }), + .f32_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f32, + .data = @bitCast(u32, float.storage.f32), + }), + .f64_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f64, + .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), + }), + .f80_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f80, + .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), + }), + .f128_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f128, + .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + }), + .c_longdouble_type => switch (float.storage) { + .f80 => |x| ip.items.appendAssumeCapacity(.{ + .tag = .float_c_longdouble_f80, + .data = try ip.addExtra(gpa, Float80.pack(x)), + }), + inline .f16, .f32, .f64, .f128 => |x| ip.items.appendAssumeCapacity(.{ + .tag = .float_c_longdouble_f128, + .data = try ip.addExtra(gpa, Float128.pack(x)), + }), + }, + .comptime_float_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_comptime_float, + .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + }), + else => unreachable, + } + }, + + .aggregate => |aggregate| { + const ty_key = ip.indexToKey(aggregate.ty); + const len = ip.aggregateTypeLen(aggregate.ty); + const child = switch (ty_key) { + .array_type => |array_type| array_type.child, + .vector_type => |vector_type| vector_type.child, + .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + const sentinel = switch (ty_key) { + .array_type => |array_type| array_type.sentinel, + .vector_type, .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + const len_including_sentinel = len + @boolToInt(sentinel != .none); + switch (aggregate.storage) { + .bytes => |bytes| { + assert(child == .u8_type); + if (bytes.len != len) { + assert(bytes.len == len_including_sentinel); + assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64); + } + }, + .elems => |elems| { + if (elems.len != len) { + assert(elems.len == len_including_sentinel); + assert(elems[@intCast(usize, len)] == sentinel); + } + }, + .repeated_elem => |elem| { + assert(sentinel == .none or elem == sentinel); + }, + } + switch (ty_key) { + .array_type, .vector_type => { + for (aggregate.storage.values()) |elem| { + assert(ip.typeOf(elem) == child); + } + }, + .struct_type => |struct_type| { + for ( + aggregate.storage.values(), + ip.structPtrUnwrapConst(struct_type.index).?.fields.values(), + ) |elem, field| { + assert(ip.typeOf(elem) == field.ty.toIntern()); + } + }, + .anon_struct_type => |anon_struct_type| { + for (aggregate.storage.values(), anon_struct_type.types) |elem, ty| { + assert(ip.typeOf(elem) == ty); + } + }, + else => unreachable, + } + + if (len == 0) { + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + switch (ty_key) { + .anon_struct_type => |anon_struct_type| opv: { + switch (aggregate.storage) { + .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| { + if (value != ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = byte }, + } })) break :opv; + }, + .elems => |elems| if (!std.mem.eql( + Index, + anon_struct_type.values, + elems, + )) break :opv, + .repeated_elem => |elem| for (anon_struct_type.values) |value| { + if (value != elem) break :opv; + }, + } + // This encoding works thanks to the fact that, as we just verified, + // the type itself contains a slice of values that can be provided + // in the aggregate fields. + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + else => {}, + } + + repeated: { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte| + if (byte != bytes[0]) break :repeated, + .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem| + if (elem != elems[0]) break :repeated, + .repeated_elem => {}, + } + const elem = switch (aggregate.storage) { + .bytes => |bytes| elem: { + _ = ip.map.pop(); + const elem = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[0] }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + break :elem elem; + }, + .elems => |elems| elems[0], + .repeated_elem => |elem| elem, + }; + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Repeated).Struct.fields.len, + ); + ip.items.appendAssumeCapacity(.{ + .tag = .repeated, + .data = ip.addExtraAssumeCapacity(Repeated{ + .ty = aggregate.ty, + .elem_val = elem, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + if (child == .u8_type) bytes: { + const string_bytes_index = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1)); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + switch (aggregate.storage) { + .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), + .elems => |elems| for (elems) |elem| switch (ip.indexToKey(elem)) { + .undef => { + ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); + break :bytes; + }, + .int => |int| ip.string_bytes.appendAssumeCapacity( + @intCast(u8, int.storage.u64), + ), + else => unreachable, + }, + .repeated_elem => |elem| switch (ip.indexToKey(elem)) { + .undef => break :bytes, + .int => |int| @memset( + ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)), + @intCast(u8, int.storage.u64), + ), + else => unreachable, + }, + } + const has_internal_null = + std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null; + if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( + @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), + ); + const string = if (has_internal_null) + @intToEnum(String, string_bytes_index) + else + (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString(); + ip.items.appendAssumeCapacity(.{ + .tag = .bytes, + .data = ip.addExtraAssumeCapacity(Bytes{ + .ty = aggregate.ty, + .bytes = string, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .aggregate, + .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ + .ty = aggregate.ty, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); + if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel)); + }, + + .un => |un| { + assert(un.ty != .none); + assert(un.tag != .none); + assert(un.val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .union_value, + .data = try ip.addExtra(gpa, un), + }); + }, + + .memoized_call => |memoized_call| { + for (memoized_call.arg_values) |arg| assert(arg != .none); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + + memoized_call.arg_values.len); + ip.items.appendAssumeCapacity(.{ + .tag = .memoized_call, + .data = ip.addExtraAssumeCapacity(MemoizedCall{ + .func = memoized_call.func, + .args_len = @intCast(u32, memoized_call.arg_values.len), + .result = memoized_call.result, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values)); + }, } return @intToEnum(Index, ip.items.len - 1); } +/// Provides API for completing an enum type after calling `getIncompleteEnum`. +pub const IncompleteEnumType = struct { + index: Index, + tag_ty_index: u32, + names_map: MapIndex, + names_start: u32, + values_map: OptionalMapIndex, + values_start: u32, + + pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { + assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty)); + ip.extra.items[self.tag_ty_index] = @enumToInt(tag_ty); + } + + /// Returns the already-existing field with the same name, if any. + pub fn addFieldName( + self: @This(), + ip: *InternPool, + gpa: Allocator, + name: NullTerminatedString, + ) Allocator.Error!?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map)]; + const field_index = map.count(); + const strings = ip.extra.items[self.names_start..][0..field_index]; + const adapter: NullTerminatedString.Adapter = .{ + .strings = @ptrCast([]const NullTerminatedString, strings), + }; + const gop = try map.getOrPutAdapted(gpa, name, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.names_start + field_index] = @enumToInt(name); + return null; + } + + /// Returns the already-existing field with the same value, if any. + /// Make sure the type of the value has the integer tag type of the enum. + pub fn addFieldValue( + self: @This(), + ip: *InternPool, + gpa: Allocator, + value: Index, + ) Allocator.Error!?u32 { + assert(ip.typeOf(value) == @intToEnum(Index, ip.extra.items[self.tag_ty_index])); + const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; + const field_index = map.count(); + const indexes = ip.extra.items[self.values_start..][0..field_index]; + const adapter: Index.Adapter = .{ + .indexes = @ptrCast([]const Index, indexes), + }; + const gop = try map.getOrPutAdapted(gpa, value, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.values_start + field_index] = @enumToInt(value); + return null; + } +}; + +/// This is used to create an enum type in the `InternPool`, with the ability +/// to update the tag type, field names, and field values later. +pub fn getIncompleteEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!IncompleteEnumType { + switch (enum_type.tag_mode) { + .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), + .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive), + } +} + +fn getIncompleteEnumAuto( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!IncompleteEnumType { + const int_tag_type = if (enum_type.tag_ty != .none) + enum_type.tag_ty + else + try ip.get(gpa, .{ .int_type = .{ + .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), + .signedness = .unsigned, + } }); + + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + + const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = int_tag_type, + .names_map = names_map, + .fields_len = enum_type.fields_len, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = extra_index, + }); + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), enum_type.fields_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = .none, + .values_start = undefined, + }; +} + +fn getIncompleteEnumExplicit( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, + tag: Tag, +) Allocator.Error!IncompleteEnumType { + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: { + const values_map = try ip.addMap(gpa); + break :m values_map.toOptional(); + }; + + const reserved_len = enum_type.fields_len + + if (enum_type.has_values) enum_type.fields_len else 0; + + const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = enum_type.fields_len, + .names_map = names_map, + .values_map = values_map, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = extra_index, + }); + // This is both fields and values (if present). + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), reserved_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = values_map, + .values_start = extra_index + extra_fields_len + enum_type.fields_len, + }; +} + +pub fn finishGetEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.EnumType, + tag: Tag, +) Allocator.Error!Index { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { + const values_map = try ip.addMap(gpa); + try addIndexesToMap(ip, gpa, values_map, enum_type.values); + break :m values_map.toOptional(); + }; + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = fields_len, + .names_map = names_map, + .values_map = values_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); + return @intToEnum(Index, ip.items.len - 1); +} + +pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const index = ip.map.getIndexAdapted(key, adapter) orelse return null; + return @intToEnum(Index, index); +} + +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + return ip.getIfExists(key).?; +} + +fn addStringsToMap( + ip: *InternPool, + gpa: Allocator, + map_index: MapIndex, + strings: []const NullTerminatedString, +) Allocator.Error!void { + const map = &ip.maps.items[@enumToInt(map_index)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = strings }; + for (strings) |string| { + const gop = try map.getOrPutAdapted(gpa, string, adapter); + assert(!gop.found_existing); + } +} + +fn addIndexesToMap( + ip: *InternPool, + gpa: Allocator, + map_index: MapIndex, + indexes: []const Index, +) Allocator.Error!void { + const map = &ip.maps.items[@enumToInt(map_index)]; + const adapter: Index.Adapter = .{ .indexes = indexes }; + for (indexes) |index| { + const gop = try map.getOrPutAdapted(gpa, index, adapter); + assert(!gop.found_existing); + } +} + +fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { + const ptr = try ip.maps.addOne(gpa); + ptr.* = .{}; + return @intToEnum(MapIndex, ip.maps.items.len - 1); +} + +/// This operation only happens under compile error conditions. +/// Leak the index until the next garbage collection. +/// TODO: this is a bit problematic to implement, can we get away without it? +pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead"); + +fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { + const limbs_len = @intCast(u32, limbs.len); + try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addLimbsExtraAssumeCapacity(Int{ + .ty = ty, + .limbs_len = limbs_len, + }), + }); + ip.addLimbsAssumeCapacity(limbs); +} + fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { - const fields = std.meta.fields(@TypeOf(extra)); + const fields = @typeInfo(@TypeOf(extra)).Struct.fields; try ip.extra.ensureUnusedCapacity(gpa, fields.len); return ip.addExtraAssumeCapacity(extra); } fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, ip.extra.items.len); - inline for (fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), + Module.Decl.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), + Module.Fn.Index => @enumToInt(@field(extra, field.name)), + MapIndex => @enumToInt(@field(extra, field.name)), + OptionalMapIndex => @enumToInt(@field(extra, field.name)), + RuntimeIndex => @enumToInt(@field(extra, field.name)), + String => @enumToInt(@field(extra, field.name)), + NullTerminatedString => @enumToInt(@field(extra, field.name)), + OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)), + TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), + Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), + Tag.TypePointer.VectorIndex => @enumToInt(@field(extra, field.name)), + Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { - const fields = std.meta.fields(T); - var i: usize = index; - var result: T = undefined; - inline for (fields) |field| { - @field(result, field.name) = switch (field.type) { - u32 => ip.extra.items[i], - Index => @intToEnum(Index, ip.extra.items[i]), - i32 => @bitCast(i32, ip.extra.items[i]), - else => @compileError("bad field type"), +fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), + @sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), + else => @compileError("unsupported host"), + } +} + +fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), + @sizeOf(u64) => {}, + else => @compileError("unsupported host"), + } + const result = @intCast(u32, ip.limbs.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { + const new: u32 = switch (field.type) { + u32 => @field(extra, field.name), + Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; - i += 1; + if (i % 2 == 0) { + ip.limbs.appendAssumeCapacity(new); + } else { + ip.limbs.items[ip.limbs.items.len - 1] |= @as(u64, new) << 32; + } } return result; } +fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), + @sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), + else => @compileError("unsupported host"), + } +} + +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, 0..) |field, i| { + const int32 = ip.extra.items[i + index]; + @field(result, field.name) = switch (field.type) { + u32 => int32, + Index => @intToEnum(Index, int32), + Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), + Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), + Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), + Module.Fn.Index => @intToEnum(Module.Fn.Index, int32), + MapIndex => @intToEnum(MapIndex, int32), + OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), + RuntimeIndex => @intToEnum(RuntimeIndex, int32), + String => @intToEnum(String, int32), + NullTerminatedString => @intToEnum(NullTerminatedString, int32), + OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), + i32 => @bitCast(i32, int32), + Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32), + TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), + Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32), + Tag.TypePointer.VectorIndex => @intToEnum(Tag.TypePointer.VectorIndex, int32), + Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + } + return .{ + .data = result, + .end = index + fields.len, + }; +} + +fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { + return extraDataTrail(ip, T, index).data; +} + +/// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. +fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => return extraData(ip, T, index), + @sizeOf(u64) => {}, + else => @compileError("unsupported host"), + } + var result: T = undefined; + inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { + const host_int = ip.limbs.items[index + i / 2]; + const int32 = if (i % 2 == 0) + @truncate(u32, host_int) + else + @truncate(u32, host_int >> 32); + + @field(result, field.name) = switch (field.type) { + u32 => int32, + Index => @intToEnum(Index, int32), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + } + return result; +} + +/// This function returns the Limb slice that is trailing data after a payload. +fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { + const field_count = @typeInfo(S).Struct.fields.len; + switch (@sizeOf(Limb)) { + @sizeOf(u32) => { + const start = limb_index + field_count; + return ip.extra.items[start..][0..len]; + }, + @sizeOf(u64) => { + const start = limb_index + @divExact(field_count, 2); + return ip.limbs.items[start..][0..len]; + }, + else => @compileError("unsupported host"), + } +} + +const LimbsAsIndexes = struct { + start: u32, + len: u32, +}; + +fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { + const host_slice = switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.items, + @sizeOf(u64) => ip.limbs.items, + else => @compileError("unsupported host"), + }; + // TODO: https://github.com/ziglang/zig/issues/1738 + return .{ + .start = @intCast(u32, @divExact(@ptrToInt(limbs.ptr) - @ptrToInt(host_slice.ptr), @sizeOf(Limb))), + .len = @intCast(u32, limbs.len), + }; +} + +/// This function converts Limb array indexes to a primitive slice type. +fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], + @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], + else => @compileError("unsupported host"), + }; +} + test "basic usage" { const gpa = std.testing.allocator; @@ -314,3 +4518,1275 @@ test "basic usage" { } }); try std.testing.expect(another_array_i32 == array_i32); } + +pub fn childType(ip: *const InternPool, i: Index) Index { + return switch (ip.indexToKey(i)) { + .ptr_type => |ptr_type| ptr_type.child, + .vector_type => |vector_type| vector_type.child, + .array_type => |array_type| array_type.child, + .opt_type, .anyframe_type => |child| child, + else => unreachable, + }; +} + +/// Given a slice type, returns the type of the ptr field. +pub fn slicePtrType(ip: *const InternPool, i: Index) Index { + switch (i) { + .slice_const_u8_type => return .manyptr_const_u8_type, + .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + else => {}, + } + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .type_slice => return @intToEnum(Index, item.data), + else => unreachable, // not a slice type + } +} + +/// Given a slice value, returns the value of the ptr field. +pub fn slicePtr(ip: *const InternPool, i: Index) Index { + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, + else => unreachable, // not a slice value + } +} + +/// Given a slice value, returns the value of the len field. +pub fn sliceLen(ip: *const InternPool, i: Index) Index { + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .ptr_slice => return ip.extraData(PtrSlice, item.data).len, + else => unreachable, // not a slice value + } +} + +/// Given an existing value, returns the same value but with the supplied type. +/// Only some combinations are allowed: +/// * identity coercion +/// * undef => any +/// * int <=> int +/// * int <=> enum +/// * enum_literal => enum +/// * ptr <=> ptr +/// * opt ptr <=> ptr +/// * opt ptr <=> opt ptr +/// * int <=> ptr +/// * null_value => opt +/// * payload => opt +/// * error set <=> error set +/// * error union <=> error union +/// * error set => error union +/// * payload => error union +/// * fn <=> fn +pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + const old_ty = ip.typeOf(val); + if (old_ty == new_ty) return val; + switch (val) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .null_value => if (ip.isOptionalType(new_ty)) + return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = .none, + } }) + else if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, + } }), + else => switch (ip.indexToKey(val)) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .extern_func = .{ + .ty = new_ty, + .decl = extern_func.decl, + .lib_name = extern_func.lib_name, + } }), + .func => |func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .func = .{ + .ty = new_ty, + .index = func.index, + } }), + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = try ip.getCoerced(gpa, val, enum_type.tag_ty), + } }), + .ptr_type => return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, + } }), + else => if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty), + }, + .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), + .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| { + const index = enum_type.nameIndex(ip, enum_literal).?; + return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = if (enum_type.values.len != 0) + enum_type.values[index] + else + try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = index }, + } }), + } }); + }, + else => {}, + }, + .ptr => |ptr| if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = ptr.addr, + .len = ptr.len, + } }) + else if (ip.isIntegerType(new_ty)) + switch (ptr.addr) { + .int => |int| return ip.getCoerced(gpa, int, new_ty), + else => {}, + }, + .opt => |opt| switch (ip.indexToKey(new_ty)) { + .ptr_type => |ptr_type| return switch (opt.val) { + .none => try ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ptr_type.flags.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, + } }), + else => |payload| try ip.getCoerced(gpa, payload, new_ty), + }, + .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = switch (opt.val) { + .none => .none, + else => try ip.getCoerced(gpa, opt.val, child_type), + }, + } }), + else => {}, + }, + .err => |err| if (ip.isErrorSetType(new_ty)) + return ip.get(gpa, .{ .err = .{ + .ty = new_ty, + .name = err.name, + } }) + else if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .err_name = err.name }, + } }), + .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = error_union.val, + } }), + else => {}, + }, + } + switch (ip.indexToKey(new_ty)) { + .opt_type => |child_type| switch (val) { + .null_value => return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = .none, + } }), + else => return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = try ip.getCoerced(gpa, val, child_type), + } }), + }, + .error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) }, + } }), + else => {}, + } + if (std.debug.runtime_safety) { + std.debug.panic("InternPool.getCoerced of {s} not implemented from {s} to {s}", .{ + @tagName(ip.indexToKey(val)), + @tagName(ip.indexToKey(old_ty)), + @tagName(ip.indexToKey(new_ty)), + }); + } + unreachable; +} + +/// Asserts `val` has an integer type. +/// Assumes `new_ty` is an integer type. +pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { + // The key cannot be passed directly to `get`, otherwise in the case of + // big_int storage, the limbs would be invalidated before they are read. + // Here we pre-reserve the limbs to ensure that the logic in `addInt` will + // not use an invalidated limbs pointer. + const new_storage: Key.Int.Storage = switch (int.storage) { + .u64, .i64, .lazy_align, .lazy_size => int.storage, + .big_int => |big_int| storage: { + const positive = big_int.positive; + const limbs = ip.limbsSliceToIndex(big_int.limbs); + // This line invalidates the limbs slice, but the indexes computed in the + // previous line are still correct. + try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + break :storage .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }; + }, + }; + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = new_storage, + } }); +} + +pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .type_struct) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); +} + +pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + switch (tags[@enumToInt(val)]) { + .type_union_tagged, .type_union_untagged, .type_union_safety => {}, + else => return .none, + } + const datas = ip.items.items(.data); + return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); +} + +pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { + assert(val != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + switch (tags[@enumToInt(val)]) { + .type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]), + else => return null, + } +} + +pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .func) return .none; + const datas = ip.items.items(.data); + return ip.extraData(Tag.Func, datas[@enumToInt(val)]).index.toOptional(); +} + +pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); +} + +/// includes .comptime_int_type +pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { + return switch (ty) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .comptime_int_type, + => true, + else => ip.indexToKey(ty) == .int_type, + }; +} + +/// does not include .enum_literal_type +pub fn isEnumType(ip: *const InternPool, ty: Index) bool { + return switch (ty) { + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => true, + else => ip.indexToKey(ty) == .enum_type, + }; +} + +pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .func_type; +} + +pub fn isPointerType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .ptr_type; +} + +pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .opt_type; +} + +/// includes .inferred_error_set_type +pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { + return ty == .anyerror_type or switch (ip.indexToKey(ty)) { + .error_set_type, .inferred_error_set_type => true, + else => false, + }; +} + +pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .inferred_error_set_type; +} + +pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .error_union_type; +} + +pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { + return switch (ip.indexToKey(ty)) { + .array_type, .vector_type, .anon_struct_type, .struct_type => true, + else => false, + }; +} + +/// The is only legal because the initializer is not part of the hash. +pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { + const item = ip.items.get(@enumToInt(index)); + assert(item.tag == .variable); + ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @enumToInt(init_index); +} + +pub fn dump(ip: *const InternPool) void { + dumpStatsFallible(ip, std.heap.page_allocator) catch return; + dumpAllFallible(ip) catch return; +} + +fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { + const items_size = (1 + 4) * ip.items.len; + const extra_size = 4 * ip.extra.items.len; + const limbs_size = 8 * ip.limbs.items.len; + // TODO: fields size is not taken into account + const structs_size = ip.allocated_structs.len * + (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const unions_size = ip.allocated_unions.len * + (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const funcs_size = ip.allocated_funcs.len * + (@sizeOf(Module.Fn) + @sizeOf(Module.Decl)); + + // TODO: map overhead size is not taken into account + const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + + structs_size + unions_size + funcs_size; + + std.debug.print( + \\InternPool size: {d} bytes + \\ {d} items: {d} bytes + \\ {d} extra: {d} bytes + \\ {d} limbs: {d} bytes + \\ {d} structs: {d} bytes + \\ {d} unions: {d} bytes + \\ {d} funcs: {d} bytes + \\ + , .{ + total_size, + ip.items.len, + items_size, + ip.extra.items.len, + extra_size, + ip.limbs.items.len, + limbs_size, + ip.allocated_structs.len, + structs_size, + ip.allocated_unions.len, + unions_size, + ip.allocated_funcs.len, + funcs_size, + }); + + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + const TagStats = struct { + count: usize = 0, + bytes: usize = 0, + }; + var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); + for (tags, datas) |tag, data| { + const gop = try counts.getOrPut(tag); + if (!gop.found_existing) gop.value_ptr.* = .{}; + gop.value_ptr.count += 1; + gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { + .type_int_signed => 0, + .type_int_unsigned => 0, + .type_array_small => @sizeOf(Vector), + .type_array_big => @sizeOf(Array), + .type_vector => @sizeOf(Vector), + .type_pointer => @sizeOf(Tag.TypePointer), + .type_slice => 0, + .type_optional => 0, + .type_anyframe => 0, + .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_error_set => b: { + const info = ip.extraData(ErrorSet, data); + break :b @sizeOf(ErrorSet) + (@sizeOf(u32) * info.names_len); + }, + .type_inferred_error_set => @sizeOf(Module.Fn.InferredErrorSet), + .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), + .type_enum_auto => @sizeOf(EnumAuto), + .type_opaque => @sizeOf(Key.OpaqueType), + .type_struct => b: { + const struct_index = @intToEnum(Module.Struct.Index, data); + const struct_obj = ip.structPtrConst(struct_index); + break :b @sizeOf(Module.Struct) + + @sizeOf(Module.Namespace) + + @sizeOf(Module.Decl) + + (struct_obj.fields.count() * @sizeOf(Module.Struct.Field)); + }, + .type_struct_ns => @sizeOf(Module.Namespace), + .type_struct_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); + }, + .type_tuple_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + }, + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + + .type_function => b: { + const info = ip.extraData(TypeFunction, data); + break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len); + }, + + .undef => 0, + .runtime_value => @sizeOf(Tag.TypeValue), + .simple_type => 0, + .simple_value => 0, + .ptr_decl => @sizeOf(PtrDecl), + .ptr_mut_decl => @sizeOf(PtrMutDecl), + .ptr_comptime_field => @sizeOf(PtrComptimeField), + .ptr_int => @sizeOf(PtrBase), + .ptr_eu_payload => @sizeOf(PtrBase), + .ptr_opt_payload => @sizeOf(PtrBase), + .ptr_elem => @sizeOf(PtrBaseIndex), + .ptr_field => @sizeOf(PtrBaseIndex), + .ptr_slice => @sizeOf(PtrSlice), + .opt_null => 0, + .opt_payload => @sizeOf(Tag.TypeValue), + .int_u8 => 0, + .int_u16 => 0, + .int_u32 => 0, + .int_i32 => 0, + .int_usize => 0, + .int_comptime_int_u32 => 0, + .int_comptime_int_i32 => 0, + .int_small => @sizeOf(IntSmall), + + .int_positive, + .int_negative, + => b: { + const int = ip.limbData(Int, data); + break :b @sizeOf(Int) + int.limbs_len * 8; + }, + + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(Tag.TypeValue), + .enum_literal => 0, + .enum_tag => @sizeOf(Tag.EnumTag), + + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Bytes) + len + + @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); + }, + .aggregate => b: { + const info = ip.extraData(Tag.Aggregate, data); + const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); + }, + .repeated => @sizeOf(Repeated), + + .float_f16 => 0, + .float_f32 => 0, + .float_f64 => @sizeOf(Float64), + .float_f80 => @sizeOf(Float80), + .float_f128 => @sizeOf(Float128), + .float_c_longdouble_f80 => @sizeOf(Float80), + .float_c_longdouble_f128 => @sizeOf(Float128), + .float_comptime_float => @sizeOf(Float128), + .variable => @sizeOf(Tag.Variable) + @sizeOf(Module.Decl), + .extern_func => @sizeOf(Tag.ExternFunc) + @sizeOf(Module.Decl), + .func => @sizeOf(Tag.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), + .only_possible_value => 0, + .union_value => @sizeOf(Key.Union), + + .memoized_call => b: { + const info = ip.extraData(MemoizedCall, data); + break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); + }, + }); + } + const SortContext = struct { + map: *std.AutoArrayHashMap(Tag, TagStats), + pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { + const values = ctx.map.values(); + return values[a_index].bytes > values[b_index].bytes; + //return values[a_index].count > values[b_index].count; + } + }; + counts.sort(SortContext{ .map = &counts }); + const len = @min(50, counts.count()); + std.debug.print(" top 50 tags:\n", .{}); + for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { + std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ + @tagName(tag), stats.count, stats.bytes, + }); + } +} + +fn dumpAllFallible(ip: *const InternPool) anyerror!void { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); + const w = bw.writer(); + for (tags, datas, 0..) |tag, data, i| { + try w.print("${d} = {s}(", .{ i, @tagName(tag) }); + switch (tag) { + .simple_type => try w.print("{s}", .{@tagName(@intToEnum(SimpleType, data))}), + .simple_value => try w.print("{s}", .{@tagName(@intToEnum(SimpleValue, data))}), + + .type_int_signed, + .type_int_unsigned, + .type_array_small, + .type_array_big, + .type_vector, + .type_pointer, + .type_optional, + .type_anyframe, + .type_error_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_enum_auto, + .type_opaque, + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + .type_function, + .undef, + .runtime_value, + .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .bytes, + .aggregate, + .repeated, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func, + .union_value, + .memoized_call, + => try w.print("{d}", .{data}), + + .opt_null, + .type_slice, + .only_possible_value, + => try w.print("${d}", .{data}), + } + try w.writeAll(")\n"); + } + try bw.flush(); +} + +pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { + return structPtrConst(ip, index.unwrap() orelse return null); +} + +pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + +pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + +pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + +pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + +pub fn createStruct( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Struct, +) Allocator.Error!Module.Struct.Index { + if (ip.structs_free_list.popOrNull()) |index| { + ip.allocated_structs.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_structs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1); +} + +pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void { + ip.structPtr(index).* = undefined; + ip.structs_free_list.append(gpa, index) catch { + // In order to keep `destroyStruct` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Struct until garbage collection. + }; +} + +pub fn createUnion( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Union, +) Allocator.Error!Module.Union.Index { + if (ip.unions_free_list.popOrNull()) |index| { + ip.allocated_unions.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_unions.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Union.Index, ip.allocated_unions.len - 1); +} + +pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void { + ip.unionPtr(index).* = undefined; + ip.unions_free_list.append(gpa, index) catch { + // In order to keep `destroyUnion` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} + +pub fn createFunc( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn, +) Allocator.Error!Module.Fn.Index { + if (ip.funcs_free_list.popOrNull()) |index| { + ip.allocated_funcs.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_funcs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1); +} + +pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { + ip.funcPtr(index).* = undefined; + ip.funcs_free_list.append(gpa, index) catch { + // In order to keep `destroyFunc` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Fn until garbage collection. + }; +} + +pub fn createInferredErrorSet( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn.InferredErrorSet, +) Allocator.Error!Module.Fn.InferredErrorSet.Index { + if (ip.inferred_error_sets_free_list.popOrNull()) |index| { + ip.allocated_inferred_error_sets.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); +} + +pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { + ip.inferredErrorSetPtr(index).* = undefined; + ip.inferred_error_sets_free_list.append(gpa, index) catch { + // In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the InferredErrorSet until garbage collection. + }; +} + +pub fn getOrPutString( + ip: *InternPool, + gpa: Allocator, + s: []const u8, +) Allocator.Error!NullTerminatedString { + try ip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); + ip.string_bytes.appendSliceAssumeCapacity(s); + ip.string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, s.len + 1); +} + +pub fn getOrPutStringFmt( + ip: *InternPool, + gpa: Allocator, + comptime format: []const u8, + args: anytype, +) Allocator.Error!NullTerminatedString { + // ensure that references to string_bytes in args do not get invalidated + const len = @intCast(usize, std.fmt.count(format, args) + 1); + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + ip.string_bytes.writer(undefined).print(format, args) catch unreachable; + ip.string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, len); +} + +pub fn getOrPutStringOpt( + ip: *InternPool, + gpa: Allocator, + optional_string: ?[]const u8, +) Allocator.Error!OptionalNullTerminatedString { + const s = optional_string orelse return .none; + const interned = try getOrPutString(ip, gpa, s); + return interned.toOptional(); +} + +/// Uses the last len bytes of ip.string_bytes as the key. +pub fn getOrPutTrailingString( + ip: *InternPool, + gpa: Allocator, + len: usize, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len - len); + if (len > 0 and string_bytes.getLast() == 0) { + _ = string_bytes.pop(); + } else { + try string_bytes.ensureUnusedCapacity(gpa, 1); + } + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ + .bytes = string_bytes, + }, std.hash_map.StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return @intToEnum(NullTerminatedString, gop.key_ptr.*); + } else { + gop.key_ptr.* = str_index; + string_bytes.appendAssumeCapacity(0); + return @intToEnum(NullTerminatedString, str_index); + } +} + +pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { + if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ + .bytes = &ip.string_bytes, + })) |index| { + return @intToEnum(NullTerminatedString, index).toOptional(); + } else { + return .none; + } +} + +pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { + const string_bytes = ip.string_bytes.items; + const start = @enumToInt(s); + var end: usize = start; + while (string_bytes[end] != 0) end += 1; + return string_bytes[start..end :0]; +} + +pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { + return ip.stringToSlice(s.unwrap() orelse return null); +} + +pub fn stringEqlSlice(ip: *const InternPool, a: NullTerminatedString, b: []const u8) bool { + return std.mem.eql(u8, stringToSlice(ip, a), b); +} + +pub fn typeOf(ip: *const InternPool, index: Index) Index { + // This optimization of static keys is required so that typeOf can be called + // on static keys that haven't been added yet during static key initialization. + // An alternative would be to topological sort the static keys, but this would + // mean that the range of type indices would not be dense. + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => .type_type, + + .undef => .undefined_type, + .zero, .one, .negative_one => .comptime_int_type, + .zero_usize, .one_usize => .usize_type, + .zero_u8, .one_u8, .four_u8 => .u8_type, + .calling_convention_c, .calling_convention_inline => .calling_convention_type, + .void_value => .void_type, + .unreachable_value => .noreturn_type, + .null_value => .null_type, + .bool_true, .bool_false => .bool_type, + .empty_struct => .empty_struct_type, + .generic_poison => .generic_poison_type, + + // This optimization on tags is needed so that indexToKey can call + // typeOf without being recursive. + _ => switch (ip.items.items(.tag)[@enumToInt(index)]) { + .type_int_signed, + .type_int_unsigned, + .type_array_big, + .type_array_small, + .type_vector, + .type_pointer, + .type_slice, + .type_optional, + .type_anyframe, + .type_error_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + .simple_type, + .type_opaque, + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + .type_function, + => .type_type, + + .undef, + .opt_null, + .only_possible_value, + => @intToEnum(Index, ip.items.items(.data)[@enumToInt(index)]), + + .simple_value => unreachable, // handled via Index above + + inline .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .error_union_payload, + .runtime_value, + .int_small, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .enum_tag, + .variable, + .extern_func, + .func, + .union_value, + .bytes, + .aggregate, + .repeated, + => |t| { + const extra_index = ip.items.items(.data)[@enumToInt(index)]; + const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; + return @intToEnum(Index, ip.extra.items[extra_index + field_index]); + }, + + .int_u8 => .u8_type, + .int_u16 => .u16_type, + .int_u32 => .u32_type, + .int_i32 => .i32_type, + .int_usize => .usize_type, + + .int_comptime_int_u32, + .int_comptime_int_i32, + => .comptime_int_type, + + // Note these are stored in limbs data, not extra data. + .int_positive, + .int_negative, + => ip.limbData(Int, ip.items.items(.data)[@enumToInt(index)]).ty, + + .enum_literal => .enum_literal_type, + .float_f16 => .f16_type, + .float_f32 => .f32_type, + .float_f64 => .f64_type, + .float_f80 => .f80_type, + .float_f128 => .f128_type, + + .float_c_longdouble_f80, + .float_c_longdouble_f128, + => .c_longdouble_type, + + .float_comptime_float => .comptime_float_type, + + .memoized_call => unreachable, + }, + + .var_args_param_type => unreachable, + .none => unreachable, + }; +} + +/// Assumes that the enum's field indexes equal its value tags. +pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { + const int = ip.indexToKey(i).enum_tag.int; + return @intToEnum(E, ip.indexToKey(int).int.storage.u64); +} + +pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len, + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} + +pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len + @boolToInt(array_type.sentinel != .none), + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} + +pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { + return switch (ty) { + .noreturn_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + else => false, + }, + }; +} + +/// This is a particularly hot function, so we operate directly on encodings +/// rather than the more straightforward implementation of calling `indexToKey`. +pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => .Int, + + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + => .Float, + + .anyopaque_type => .Opaque, + .bool_type => .Bool, + .void_type => .Void, + .type_type => .Type, + .anyerror_type => .ErrorSet, + .comptime_int_type => .ComptimeInt, + .comptime_float_type => .ComptimeFloat, + .noreturn_type => .NoReturn, + .anyframe_type => .AnyFrame, + .null_type => .Null, + .undefined_type => .Undefined, + .enum_literal_type => .EnumLiteral, + + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => .Enum, + + .prefetch_options_type, + .export_options_type, + .extern_options_type, + => .Struct, + + .type_info_type => .Union, + + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + => .Pointer, + + .anyerror_void_error_union_type => .ErrorUnion, + .empty_struct_type => .Struct, + + .generic_poison_type => return error.GenericPoison, + + // values, not types + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + .var_args_param_type => unreachable, // special tag + + _ => switch (ip.items.items(.tag)[@enumToInt(index)]) { + .type_int_signed, + .type_int_unsigned, + => .Int, + + .type_array_big, + .type_array_small, + => .Array, + + .type_vector => .Vector, + + .type_pointer, + .type_slice, + => .Pointer, + + .type_optional => .Optional, + .type_anyframe => .AnyFrame, + .type_error_union => .ErrorUnion, + + .type_error_set, + .type_inferred_error_set, + => .ErrorSet, + + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + => .Enum, + + .simple_type => unreachable, // handled via Index tag above + + .type_opaque => .Opaque, + + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + => .Struct, + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .Union, + + .type_function => .Fn, + + // values, not types + .undef, + .runtime_value, + .simple_value, + .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .opt_null, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func, + .only_possible_value, + .union_value, + .bytes, + .aggregate, + .repeated, + // memoization, not types + .memoized_call, + => unreachable, + }, + .none => unreachable, // special tag + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index 59135ef5c8..b12b638208 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -5,15 +5,17 @@ //! Some instructions are special, such as: //! * Conditional Branches //! * Switch Branches -const Liveness = @This(); const std = @import("std"); -const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const Air = @import("Air.zig"); const Log2Int = std.math.Log2Int; +const Liveness = @This(); +const trace = @import("tracy.zig").trace; +const Air = @import("Air.zig"); +const InternPool = @import("InternPool.zig"); + pub const Verify = @import("Liveness/Verify.zig"); /// This array is split into sets of 4 bits per AIR instruction. @@ -129,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }; } -pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -142,6 +144,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .intern_pool = intern_pool, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -222,6 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, + ip: *const InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -317,9 +321,10 @@ pub fn categorizeOperand( .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, - .const_ty, + .interned, .trap, .breakpoint, .dbg_stmt, @@ -530,7 +535,7 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -621,7 +626,7 @@ pub fn categorizeOperand( var operand_live: bool = true; for (air.extra[cond_extra.end..][0..2]) |cond_inst| { - if (l.categorizeOperand(air, cond_inst, operand) == .tomb) + if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) operand_live = false; switch (air_tags[cond_inst]) { @@ -818,6 +823,7 @@ pub const BigTomb = struct { const Analysis = struct { gpa: Allocator, air: Air, + intern_pool: *const InternPool, tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), @@ -867,6 +873,7 @@ fn analyzeInst( data: *LivenessPassData(pass), inst: Air.Inst.Index, ) Allocator.Error!void { + const ip = a.intern_pool; const inst_tags = a.air.instructions.items(.tag); const inst_datas = a.air.instructions.items(.data); @@ -967,9 +974,7 @@ fn analyzeInst( .work_group_id, => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), - .constant, - .const_ty, - => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .trap, .unreach, @@ -1134,7 +1139,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -1253,19 +1258,17 @@ fn analyzeOperands( ) Allocator.Error!void { const gpa = a.gpa; const inst_tags = a.air.instructions.items(.tag); + const ip = a.intern_pool; switch (pass) { .loop_analysis => { _ = data.live_set.remove(inst); for (operands) |op_ref| { - const operand = Air.refToIndex(op_ref) orelse continue; + const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .const_ty => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; _ = try data.live_set.put(gpa, operand, {}); } @@ -1288,20 +1291,17 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst)) { + if (!immediate_death or a.air.mustLower(inst, ip)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; while (i > 0) { i -= 1; const op_ref = operands[i]; - const operand = Air.refToIndex(op_ref) orelse continue; + const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .const_ty => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; const mask = @as(Bpi, 1) << @intCast(OperandInt, i); @@ -1407,7 +1407,7 @@ fn analyzeInstBlock( // If the block is noreturn, block deaths not only aren't useful, they're impossible to // find: there could be more stuff alive after the block than before it! - if (!a.air.getRefType(ty_pl.ty).isNoReturn()) { + if (!a.intern_pool.isNoReturn(a.air.getRefType(ty_pl.ty).ip_index)) { // The block kills the difference in the live sets const block_scope = data.block_scopes.get(inst).?; const num_deaths = data.live_set.count() - block_scope.live_set.count(); @@ -1819,6 +1819,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { /// Must be called with operands in reverse order. fn feed(big: *Self, op_ref: Air.Inst.Ref) !void { + const ip = big.a.intern_pool; // Note that after this, `operands_remaining` becomes the index of the current operand big.operands_remaining -= 1; @@ -1831,15 +1832,12 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); - switch (inst_tags[operand]) { - .constant, .const_ty => return, - else => {}, - } + if (inst_tags[operand] == .interned) return // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index a55ebe52a6..a5fc592894 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -5,6 +5,7 @@ air: Air, liveness: Liveness, live: LiveMap = .{}, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; @@ -27,10 +28,11 @@ pub fn verify(self: *Verify) Error!void { const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void); fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { + const ip = self.intern_pool; const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) { // This instruction will not be lowered and should be ignored. continue; } @@ -39,9 +41,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // no operands .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, - .const_ty, + .interned, .breakpoint, .dbg_stmt, .dbg_inline_begin, @@ -58,10 +61,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .work_item_id, .work_group_size, .work_group_id, - => try self.verifyInst(inst, .{ .none, .none, .none }), + => try self.verifyInstOperands(inst, .{ .none, .none, .none }), .trap, .unreach => { - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); // This instruction terminates the function, so everything should be dead if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst}); }, @@ -110,7 +113,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .c_va_copy, => { const ty_op = data[inst].ty_op; - try self.verifyInst(inst, .{ ty_op.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ ty_op.operand, .none, .none }); }, .is_null, .is_non_null, @@ -146,13 +149,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .c_va_end, => { const un_op = data[inst].un_op; - try self.verifyInst(inst, .{ un_op, .none, .none }); + try self.verifyInstOperands(inst, .{ un_op, .none, .none }); }, .ret, .ret_load, => { const un_op = data[inst].un_op; - try self.verifyInst(inst, .{ un_op, .none, .none }); + try self.verifyInstOperands(inst, .{ un_op, .none, .none }); // This instruction terminates the function, so everything should be dead if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst}); }, @@ -161,36 +164,36 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .wasm_memory_grow, => { const pl_op = data[inst].pl_op; - try self.verifyInst(inst, .{ pl_op.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ pl_op.operand, .none, .none }); }, .prefetch => { const prefetch = data[inst].prefetch; - try self.verifyInst(inst, .{ prefetch.ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ prefetch.ptr, .none, .none }); }, .reduce, .reduce_optimized, => { const reduce = data[inst].reduce; - try self.verifyInst(inst, .{ reduce.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ reduce.operand, .none, .none }); }, .union_init => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.init, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.init, .none, .none }); }, .struct_field_ptr, .struct_field_val => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.struct_operand, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.struct_operand, .none, .none }); }, .field_parent_ptr => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.field_ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.field_ptr, .none, .none }); }, .atomic_load => { const atomic_load = data[inst].atomic_load; - try self.verifyInst(inst, .{ atomic_load.ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ atomic_load.ptr, .none, .none }); }, // binary @@ -260,7 +263,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .memcpy, => { const bin_op = data[inst].bin_op; - try self.verifyInst(inst, .{ bin_op.lhs, bin_op.rhs, .none }); + try self.verifyInstOperands(inst, .{ bin_op.lhs, bin_op.rhs, .none }); }, .add_with_overflow, .sub_with_overflow, @@ -274,62 +277,62 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, .shuffle => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.a, extra.b, .none }); + try self.verifyInstOperands(inst, .{ extra.a, extra.b, .none }); }, .cmp_vector, .cmp_vector_optimized, => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, .atomic_rmw => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; - try self.verifyInst(inst, .{ pl_op.operand, extra.operand, .none }); + try self.verifyInstOperands(inst, .{ pl_op.operand, extra.operand, .none }); }, // ternary .select => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs }); + try self.verifyInstOperands(inst, .{ pl_op.operand, extra.lhs, extra.rhs }); }, .mul_add => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); }, .vector_store_elem => { const vector_store_elem = data[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data; - try self.verifyInst(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs }); + try self.verifyInstOperands(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs }); }, .cmpxchg_strong, .cmpxchg_weak, => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.ptr, extra.expected_value, extra.new_value }); + try self.verifyInstOperands(inst, .{ extra.ptr, extra.expected_value, extra.new_value }); }, // big tombs .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); for (elements) |element| { try self.verifyOperand(inst, element, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const pl_op = data[inst].pl_op; @@ -344,7 +347,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (args) |arg| { try self.verifyOperand(inst, arg, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .assembly => { const ty_pl = data[inst].ty_pl; @@ -370,7 +373,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (inputs) |input| { try self.verifyOperand(inst, input, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, // control flow @@ -394,7 +397,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .try_ptr => { const ty_pl = data[inst].ty_pl; @@ -416,7 +419,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .br => { const br = data[inst].br; @@ -428,7 +431,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { } else { gop.value_ptr.* = try self.live.clone(self.gpa); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .block => { const ty_pl = data[inst].ty_pl; @@ -450,7 +453,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (block_liveness.deaths) |death| try self.verifyDeath(inst, death); - if (block_ty.isNoReturn()) { + if (ip.isNoReturn(block_ty.toIntern())) { assert(!self.blocks.contains(inst)); } else { var live = self.blocks.fetchRemove(inst).?.value; @@ -459,7 +462,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyMatchingLiveness(inst, live); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .loop => { const ty_pl = data[inst].ty_pl; @@ -474,7 +477,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // The same stuff should be alive after the loop as before it try self.verifyMatchingLiveness(inst, live); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .cond_br => { const pl_op = data[inst].pl_op; @@ -497,7 +500,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death); try self.verifyBody(else_body); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .switch_br => { const pl_op = data[inst].pl_op; @@ -541,7 +544,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyBody(else_body); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, } } @@ -552,20 +555,22 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err } fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { - const operand = Air.refToIndex(op_ref) orelse return; - switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty => {}, - else => { - if (dies) { - if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); - } else { - if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); - } - }, + const operand = Air.refToIndexAllowNone(op_ref) orelse { + assert(!dies); + return; + }; + if (self.air.instructions.items(.tag)[operand] == .interned) { + assert(!dies); + return; + } + if (dies) { + if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); + } else { + if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); } } -fn verifyInst( +fn verifyInstOperands( self: *Verify, inst: Air.Inst.Index, operands: [Liveness.bpi - 1]Air.Inst.Ref, @@ -574,16 +579,15 @@ fn verifyInst( const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); try self.verifyOperand(inst, operand, dies); } - const tag = self.air.instructions.items(.tag); - switch (tag[inst]) { - .constant, .const_ty => unreachable, - else => { - if (self.liveness.isUnused(inst)) { - assert(!self.live.contains(inst)); - } else { - try self.live.putNoClobber(self.gpa, inst, {}); - } - }, + try self.verifyInst(inst); +} + +fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void { + if (self.air.instructions.items(.tag)[inst] == .interned) return; + if (self.liveness.isUnused(inst)) { + assert(!self.live.contains(inst)); + } else { + try self.live.putNoClobber(self.gpa, inst, {}); } } @@ -604,4 +608,5 @@ const log = std.log.scoped(.liveness_verify); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const Verify = @This(); diff --git a/src/Module.zig b/src/Module.zig index a8f2281c4f..61f39a327a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -32,6 +32,19 @@ const build_options = @import("build_options"); const Liveness = @import("Liveness.zig"); const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); +const InternPool = @import("InternPool.zig"); + +comptime { + @setEvalBranchQuota(4000); + for ( + @typeInfo(Zir.Inst.Ref).Enum.fields, + @typeInfo(Air.Inst.Ref).Enum.fields, + @typeInfo(InternPool.Index).Enum.fields, + ) |zir_field, air_field, ip_field| { + assert(mem.eql(u8, zir_field.name, ip_field.name)); + assert(mem.eql(u8, air_field.name, ip_field.name)); + } +} /// General-purpose allocator. Used for both temporary and long-term storage. gpa: Allocator, @@ -72,28 +85,29 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// This is a temporary addition to stage2 in order to match legacy behavior, -/// however the end-game once the lang spec is settled will be to use a global -/// InternPool for comptime memoized objects, making this behavior consistent across all types, -/// not only string literals. Or, we might decide to not guarantee string literals -/// to have equal comptime pointers, in which case this field can be deleted (perhaps -/// the commit that introduced it can simply be reverted). -/// This table uses an optional index so that when a Decl is destroyed, the string literal -/// is still reclaimable by a future Decl. -string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, -string_literal_bytes: ArrayListUnmanaged(u8) = .{}, +/// Stores all Type and Value objects; periodically garbage collected. +intern_pool: InternPool = .{}, +/// To be eliminated in a future commit by moving more data into InternPool. +/// Current uses that must be eliminated: +/// * Struct comptime_args +/// * Struct optimized_order +/// * Union fields +/// This memory lives until the Module is destroyed. +tmp_hack_arena: std.heap.ArenaAllocator, + +/// This is currently only used for string literals. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + +monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{}, /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. monomorphed_funcs: MonomorphedFuncsSet = .{}, -/// The set of all comptime function calls that have been cached so that future calls -/// with the same parameters will get the same return value. -memoized_calls: MemoizedCallSet = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{}, +align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. @@ -120,13 +134,8 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, []CImportError) = .{}, /// contains Decls that need to be deleted if they end up having no references to them. deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// Error tags and their values, tag names are duped with mod.gpa. -/// Corresponds with `error_name_list`. -global_error_set: std.StringHashMapUnmanaged(ErrorInt) = .{}, - -/// ErrorInt -> []const u8 for fast lookups for @intToError at comptime -/// Corresponds with `global_error_set`. -error_name_list: ArrayListUnmanaged([]const u8), +/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. +global_error_set: GlobalErrorSet = .{}, /// Incrementing integer used to compare against the corresponding Decl /// field to determine whether a Decl's status applies to an ongoing update, or a @@ -165,6 +174,11 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{}, /// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. decls_free_list: ArrayListUnmanaged(Decl.Index) = .{}, +/// Same pattern as with `allocated_decls`. +allocated_namespaces: std.SegmentedList(Namespace, 0) = .{}, +/// Same pattern as with `decls_free_list`. +namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{}, + global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -172,6 +186,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { src: LazySrcLoc, }) = .{}, +pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + pub const CImportError = struct { offset: u32, line: u32, @@ -187,108 +203,40 @@ pub const CImportError = struct { } }; -pub const StringLiteralContext = struct { - bytes: *ArrayListUnmanaged(u8), +pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 }; - pub const Key = struct { - index: u32, - len: u32, - }; +pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index }; - pub fn eql(self: @This(), a: Key, b: Key) bool { - _ = self; - return a.index == b.index and a.len == b.len; - } - - pub fn hash(self: @This(), x: Key) u64 { - const x_slice = self.bytes.items[x.index..][0..x.len]; - return std.hash_map.hashString(x_slice); - } -}; - -pub const StringLiteralAdapter = struct { - bytes: *ArrayListUnmanaged(u8), - - pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool { - const b_slice = self.bytes.items[b.index..][0..b.len]; - return mem.eql(u8, a_slice, b_slice); - } - - pub fn hash(self: @This(), adapted_key: []const u8) u64 { - _ = self; - return std.hash_map.hashString(adapted_key); - } -}; - -const MonomorphedFuncsSet = std.HashMapUnmanaged( - *Fn, - void, +pub const MonomorphedFuncsSet = std.HashMapUnmanaged( + MonomorphedFuncKey, + InternPool.Index, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); -const MonomorphedFuncsContext = struct { - pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool { - _ = ctx; - return a == b; +pub const MonomorphedFuncsContext = struct { + mod: *Module, + + pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool { + return std.meta.eql(a, b); } - /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: *Fn) u64 { - _ = ctx; - return key.hash; + pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 { + const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len]; + return std.hash.Wyhash.hash(@enumToInt(key.func), std.mem.sliceAsBytes(key_args)); } }; -pub const MemoizedCallSet = std.HashMapUnmanaged( - MemoizedCall.Key, - MemoizedCall.Result, - MemoizedCall, - std.hash_map.default_max_load_percentage, -); +pub const MonomorphedFuncsAdaptedContext = struct { + mod: *Module, -pub const MemoizedCall = struct { - module: *Module, - - pub const Key = struct { - func: *Fn, - args: []TypedValue, - }; - - pub const Result = struct { - val: Value, - arena: std.heap.ArenaAllocator.State, - }; - - pub fn eql(ctx: @This(), a: Key, b: Key) bool { - if (a.func != b.func) return false; - - assert(a.args.len == b.args.len); - for (a.args, 0..) |a_arg, arg_i| { - const b_arg = b.args[arg_i]; - if (!a_arg.eql(b_arg, ctx.module)) { - return false; - } - } - - return true; + pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool { + const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len]; + return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args); } - /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: Key) u64 { - var hasher = std.hash.Wyhash.init(0); - - // The generic function Decl is guaranteed to be the first dependency - // of each of its instantiations. - std.hash.autoHash(&hasher, key.func); - - // This logic must be kept in sync with the logic in `analyzeCall` that - // computes the hash. - for (key.args) |arg| { - arg.hash(&hasher, ctx.module); - } - - return hasher.final(); + pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 { + return std.hash.Wyhash.hash(@enumToInt(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args)); } }; @@ -322,7 +270,7 @@ pub const GlobalEmitH = struct { pub const ErrorInt = u32; pub const Export = struct { - options: std.builtin.ExportOptions, + opts: Options, src: LazySrcLoc, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: Decl.Index, @@ -340,10 +288,17 @@ pub const Export = struct { complete, }, + pub const Options = struct { + name: InternPool.NullTerminatedString, + linkage: std.builtin.GlobalLinkage = .Strong, + section: InternPool.OptionalNullTerminatedString = .none, + visibility: std.builtin.SymbolVisibility = .default, + }; + pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { const src_decl = mod.declPtr(exp.src_decl); return .{ - .file_scope = src_decl.getFileScope(), + .file_scope = src_decl.getFileScope(mod), .parent_decl_node = src_decl.src_node, .lazy = exp.src, }; @@ -351,61 +306,76 @@ pub const Export = struct { }; pub const CaptureScope = struct { + refs: u32, parent: ?*CaptureScope, /// Values from this decl's evaluation that will be closed over in - /// child decls. Values stored in the value_arena of the linked decl. - /// During sema, this map is backed by the gpa. Once sema completes, - /// it is reallocated using the value_arena. - captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{}, + /// child decls. This map is backed by the gpa, and deinited when + /// the refcount reaches 0. + captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Capture) = .{}, - pub fn failed(noalias self: *const @This()) bool { + pub const Capture = union(enum) { + comptime_val: InternPool.Index, // index of value + runtime_val: InternPool.Index, // index of type + }; + + pub fn failed(noalias self: *const CaptureScope) bool { return self.captures.available == 0 and self.captures.size == std.math.maxInt(u32); } - pub fn fail(noalias self: *@This()) void { + pub fn fail(noalias self: *CaptureScope, gpa: Allocator) void { + self.captures.deinit(gpa); self.captures.available = 0; self.captures.size = std.math.maxInt(u32); } + + pub fn incRef(self: *CaptureScope) void { + self.refs += 1; + } + + pub fn decRef(self: *CaptureScope, gpa: Allocator) void { + self.refs -= 1; + if (self.refs > 0) return; + if (self.parent) |p| p.decRef(gpa); + if (!self.failed()) { + self.captures.deinit(gpa); + } + gpa.destroy(self); + } }; pub const WipCaptureScope = struct { scope: *CaptureScope, finalized: bool, gpa: Allocator, - perm_arena: Allocator, - pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() { - const scope = try perm_arena.create(CaptureScope); - scope.* = .{ .parent = parent }; - return @This(){ + pub fn init(gpa: Allocator, parent: ?*CaptureScope) !WipCaptureScope { + const scope = try gpa.create(CaptureScope); + if (parent) |p| p.incRef(); + scope.* = .{ .refs = 1, .parent = parent }; + return .{ .scope = scope, .finalized = false, .gpa = gpa, - .perm_arena = perm_arena, }; } - pub fn finalize(noalias self: *@This()) !void { - assert(!self.finalized); - // use a temp to avoid unintentional aliasing due to RLS - const tmp = try self.scope.captures.clone(self.perm_arena); - self.scope.captures.deinit(self.gpa); - self.scope.captures = tmp; + pub fn finalize(noalias self: *WipCaptureScope) !void { self.finalized = true; } - pub fn reset(noalias self: *@This(), parent: ?*CaptureScope) !void { - if (!self.finalized) try self.finalize(); - self.scope = try self.perm_arena.create(CaptureScope); - self.scope.* = .{ .parent = parent }; - self.finalized = false; + pub fn reset(noalias self: *WipCaptureScope, parent: ?*CaptureScope) !void { + self.scope.decRef(self.gpa); + self.scope = try self.gpa.create(CaptureScope); + if (parent) |p| p.incRef(); + self.scope.* = .{ .refs = 1, .parent = parent }; } - pub fn deinit(noalias self: *@This()) void { - if (!self.finalized) { - self.scope.captures.deinit(self.gpa); - self.scope.fail(); + pub fn deinit(noalias self: *WipCaptureScope) void { + if (self.finalized) { + self.scope.decRef(self.gpa); + } else { + self.scope.fail(self.gpa); } self.* = undefined; } @@ -452,8 +422,7 @@ const ValueArena = struct { }; pub const Decl = struct { - /// Allocated with Module's allocator; outlives the ZIR code. - name: [*:0]const u8, + name: InternPool.NullTerminatedString, /// The most recent Type of the Decl after a successful semantic analysis. /// Populated when `has_tv`. ty: Type, @@ -461,20 +430,16 @@ pub const Decl = struct { /// Populated when `has_tv`. val: Value, /// Populated when `has_tv`. - /// Points to memory inside value_arena. - @"linksection": ?[*:0]const u8, + @"linksection": InternPool.OptionalNullTerminatedString, /// Populated when `has_tv`. @"align": u32, /// Populated when `has_tv`. @"addrspace": std.builtin.AddressSpace, - /// The memory for ty, val, align, linksection, and captures. - /// If this is `null` then there is no memory management needed. - value_arena: ?*ValueArena = null, /// The direct parent namespace of the Decl. /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is /// the namespace of the struct, since there is no parent. - src_namespace: *Namespace, + src_namespace: Namespace.Index, /// The scope which lexically contains this decl. A decl must depend /// on its lexical parent, in order to ensure that this pointer is valid. @@ -624,55 +589,17 @@ pub const Decl = struct { function_body, }; - pub fn clearName(decl: *Decl, gpa: Allocator) void { - gpa.free(mem.sliceTo(decl.name, 0)); - decl.name = undefined; - } - pub fn clearValues(decl: *Decl, mod: *Module) void { - const gpa = mod.gpa; - if (decl.getExternFn()) |extern_fn| { - extern_fn.deinit(gpa); - gpa.destroy(extern_fn); - } - if (decl.getFunction()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); - if (func.comptime_args != null) { - _ = mod.monomorphed_funcs.remove(func); - } - func.deinit(gpa); - gpa.destroy(func); + mod.destroyFunc(func); } - if (decl.getVariable()) |variable| { - variable.deinit(gpa); - gpa.destroy(variable); - } - if (decl.value_arena) |value_arena| { - if (decl.owns_tv) { - if (decl.val.castTag(.str_lit)) |str_lit| { - mod.string_literal_table.getPtrContext(str_lit.data, .{ - .bytes = &mod.string_literal_bytes, - }).?.* = .none; - } - } - value_arena.deinit(gpa); - decl.value_arena = null; - decl.has_tv = false; - decl.owns_tv = false; - } - } - - pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { - assert(decl.value_arena == null); - const value_arena = try arena.allocator().create(ValueArena); - value_arena.* = .{ .state = arena.state }; - decl.value_arena = value_arena; } /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. - pub fn getName(decl: Decl) ?[:0]const u8 { - const zir = decl.getFileScope().zir; + pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 { + const zir = decl.getFileScope(mod).zir; return decl.getNameZir(zir); } @@ -683,8 +610,8 @@ pub const Decl = struct { return zir.nullTerminatedString(name_index); } - pub fn contentsHash(decl: Decl) std.zig.SrcHash { - const zir = decl.getFileScope().zir; + pub fn contentsHash(decl: Decl, mod: *Module) std.zig.SrcHash { + const zir = decl.getFileScope(mod).zir; return decl.contentsHashZir(zir); } @@ -695,31 +622,31 @@ pub const Decl = struct { return contents_hash; } - pub fn zirBlockIndex(decl: *const Decl) Zir.Inst.Index { + pub fn zirBlockIndex(decl: *const Decl, mod: *Module) Zir.Inst.Index { assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return zir.extra[decl.zir_decl_index + 6]; } - pub fn zirAlignRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAlignRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]); } - pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref { + pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align); return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } - pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align) + 1; return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } @@ -744,154 +671,167 @@ pub const Decl = struct { return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index)); } - pub fn srcLoc(decl: Decl) SrcLoc { - return decl.nodeOffsetSrcLoc(0); + pub fn srcLoc(decl: Decl, mod: *Module) SrcLoc { + return decl.nodeOffsetSrcLoc(0, mod); } - pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32) SrcLoc { + pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32, mod: *Module) SrcLoc { return .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = LazySrcLoc.nodeOffset(node_offset), }; } - pub fn srcToken(decl: Decl) Ast.TokenIndex { - const tree = &decl.getFileScope().tree; + pub fn srcToken(decl: Decl, mod: *Module) Ast.TokenIndex { + const tree = &decl.getFileScope(mod).tree; return tree.firstToken(decl.src_node); } - pub fn srcByteOffset(decl: Decl) u32 { - const tree = &decl.getFileScope().tree; + pub fn srcByteOffset(decl: Decl, mod: *Module) u32 { + const tree = &decl.getFileScope(mod).tree; return tree.tokens.items(.start)[decl.srcToken()]; } pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); if (decl.name_fully_qualified) { - return writer.writeAll(unqualified_name); + try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)}); + } else { + try mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, decl.name, writer); } - return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer); } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); - return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, decl.name, writer); } - pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { - var buffer = std.ArrayList(u8).init(mod.gpa); - defer buffer.deinit(); - try decl.renderFullyQualifiedName(mod, buffer.writer()); + pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString { + if (decl.name_fully_qualified) return decl.name; + + const ip = &mod.intern_pool; + const count = count: { + var count: usize = ip.stringToSlice(decl.name).len + 1; + var ns: Namespace.Index = decl.src_namespace; + while (true) { + const namespace = mod.namespacePtr(ns); + const ns_decl = mod.declPtr(namespace.getDeclIndex(mod)); + count += ip.stringToSlice(ns_decl.name).len + 1; + ns = namespace.parent.unwrap() orelse { + count += namespace.file_scope.sub_file_path.len; + break :count count; + }; + } + }; + + const gpa = mod.gpa; + const start = ip.string_bytes.items.len; + // Protects reads of interned strings from being reallocated during the call to + // renderFullyQualifiedName. + try ip.string_bytes.ensureUnusedCapacity(gpa, count); + decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)) catch unreachable; // Sanitize the name for nvptx which is more restrictive. + // TODO This should be handled by the backend, not the frontend. Have a + // look at how the C backend does it for inspiration. if (mod.comp.bin_file.options.target.cpu.arch.isNvptx()) { - for (buffer.items) |*byte| switch (byte.*) { + for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) { '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', else => {}, }; } - return buffer.toOwnedSliceSentinel(0); + return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); } pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { if (!decl.has_tv) return error.AnalysisFail; - return TypedValue{ - .ty = decl.ty, - .val = decl.val, - }; + return TypedValue{ .ty = decl.ty, .val = decl.val }; } - pub fn value(decl: *Decl) error{AnalysisFail}!Value { - return (try decl.typedValue()).val; + pub fn internValue(decl: *Decl, mod: *Module) Allocator.Error!InternPool.Index { + assert(decl.has_tv); + const ip_index = try decl.val.intern(decl.ty, mod); + decl.val = ip_index.toValue(); + return ip_index; } - pub fn isFunction(decl: Decl) !bool { + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); - return tv.ty.zigTypeTag() == .Fn; + return tv.ty.zigTypeTag(mod) == .Fn; } - /// If the Decl has a value and it is a struct, return it, + /// If the Decl owns its value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl) ?*Struct { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - const struct_obj = (ty.castTag(.@"struct") orelse return null).data; - return struct_obj; + pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod)); } - /// If the Decl has a value and it is a union, return it, - /// otherwise null. - pub fn getUnion(decl: *Decl) ?*Union { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data; - return union_obj; + pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { + if (!decl.owns_tv) return .none; + if (decl.val.ip_index == .none) return .none; + return mod.intern_pool.indexToStructType(decl.val.toIntern()); } - /// If the Decl has a value and it is a function, return it, + /// If the Decl owns its value and it is a union, return it, /// otherwise null. - pub fn getFunction(decl: *const Decl) ?*Fn { + pub fn getOwnedUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; - const func = (decl.val.castTag(.function) orelse return null).data; - return func; + if (decl.val.ip_index == .none) return null; + return mod.typeToUnion(decl.val.toType()); } - /// If the Decl has a value and it is an extern function, returns it, + /// If the Decl owns its value and it is a function, return it, /// otherwise null. - pub fn getExternFn(decl: *const Decl) ?*ExternFn { - if (!decl.owns_tv) return null; - const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data; - return extern_fn; + pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod)); } - /// If the Decl has a value and it is a variable, returns it, + pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; + } + + /// If the Decl owns its value and it is an extern function, returns it, /// otherwise null. - pub fn getVariable(decl: *const Decl) ?*Var { - if (!decl.owns_tv) return null; - const variable = (decl.val.castTag(.variable) orelse return null).data; - return variable; + pub fn getOwnedExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; + } + + /// If the Decl owns its value and it is a variable, returns it, + /// otherwise null. + pub fn getOwnedVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespace(decl: *Decl) ?*Namespace { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return &struct_obj.namespace; - }, - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return &enum_obj.namespace; - }, - .empty_struct => { - return ty.castTag(.empty_struct).?.data; - }, - .@"opaque" => { - const opaque_obj = ty.cast(Type.Payload.Opaque).?.data; - return &opaque_obj.namespace; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return &union_obj.namespace; + pub fn getOwnedInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { + if (!decl.owns_tv) return .none; + return switch (decl.val.ip_index) { + .empty_struct_type => .none, + .none => .none, + else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, + else => .none, }, + }; + } - else => return null, - } + /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. + pub fn getOwnedInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return mod.namespacePtrUnwrap(decl.getOwnedInnerNamespaceIndex(mod)); } pub fn dump(decl: *Decl) void { const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src); - std.debug.print("{s}:{d}:{d} name={s} status={s}", .{ + std.debug.print("{s}:{d}:{d} name={d} status={s}", .{ decl.scope.sub_file_path, loc.line + 1, loc.column + 1, - mem.sliceTo(decl.name, 0), + @enumToInt(decl.name), @tagName(decl.analysis), }); if (decl.has_tv) { @@ -900,8 +840,8 @@ pub const Decl = struct { std.debug.print("\n", .{}); } - pub fn getFileScope(decl: Decl) *File { - return decl.src_namespace.file_scope; + pub fn getFileScope(decl: Decl, mod: *Module) *File { + return mod.namespacePtr(decl.src_namespace).file_scope; } pub fn removeDependant(decl: *Decl, other: Decl.Index) void { @@ -912,25 +852,29 @@ pub const Decl = struct { assert(decl.dependencies.swapRemove(other)); } - pub fn isExtern(decl: Decl) bool { + pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (decl.val.tag()) { - .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.tag() == .unreachable_value, + return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { + .variable => |variable| variable.is_extern, + .extern_func => true, else => false, }; } - pub fn getAlignment(decl: Decl, target: Target) u32 { + pub fn getAlignment(decl: Decl, mod: *Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. return decl.@"align"; } else { // Natural alignment. - return decl.ty.abiAlignment(target); + return decl.ty.abiAlignment(mod); } } + + pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void { + decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); + } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -938,38 +882,6 @@ pub const EmitH = struct { fwd_decl: ArrayListUnmanaged(u8) = .{}, }; -/// Represents the data that an explicit error set syntax provides. -pub const ErrorSet = struct { - /// The Decl that corresponds to the error set itself. - owner_decl: Decl.Index, - /// The string bytes are stored in the owner Decl arena. - /// These must be in sorted order. See sortNames. - names: NameMap, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - - pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - /// sort the NameMap. This should be called whenever the map is modified. - /// alloc should be the allocator used for the NameMap data. - pub fn sortNames(names: *NameMap) void { - const Context = struct { - keys: [][]const u8, - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - return std.mem.lessThan(u8, ctx.keys[a_index], ctx.keys[b_index]); - } - }; - names.sort(Context{ .keys = names.keys() }); - } -}; - pub const PropertyBoolean = enum { no, yes, unknown, wip }; /// Represents the data that a struct declaration provides. @@ -977,7 +889,7 @@ pub const Struct = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this struct. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the struct itself. owner_decl: Decl.Index, /// Index of the struct_decl ZIR instruction. @@ -989,7 +901,7 @@ pub const Struct = struct { /// If the layout is packed, this is the backing integer type of the packed struct. /// Whether zig chooses this type or the user specifies it, it is stored here. /// This will be set to the noreturn type until status is `have_layout`. - backing_int_ty: Type = Type.initTag(.noreturn), + backing_int_ty: Type = Type.noreturn, status: enum { none, field_types_wip, @@ -1011,15 +923,37 @@ pub const Struct = struct { is_tuple: bool, assumed_runtime_bits: bool = false, - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. pub const Field = struct { /// Uses `noreturn` to indicate `anytype`. /// undefined until `status` is >= `have_field_types`. ty: Type, - /// Uses `unreachable_value` to indicate no default. - default_val: Value, + /// Uses `none` to indicate no default. + default_val: InternPool.Index, /// Zero means to use the ABI alignment of the type. abi_align: u32, /// undefined until `status` is `have_layout`. @@ -1030,7 +964,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - target: Target, + mod: *Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1038,24 +972,26 @@ pub const Struct = struct { return field.abi_align; } + const target = mod.getTarget(); + switch (layout) { .Packed => return 0, .Auto => { if (target.ofmt == .c) { - return alignmentExtern(field, target); + return alignmentExtern(field, mod); } else { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } }, - .Extern => return alignmentExtern(field, target), + .Extern => return alignmentExtern(field, mod), } } - pub fn alignmentExtern(field: Field, target: Target) u32 { + pub fn alignmentExtern(field: Field, mod: *Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. - const ty_abi_align = field.ty.abiAlignment(target); + const ty_abi_align = field.ty.abiAlignment(mod); - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. return @max(ty_abi_align, 16); @@ -1069,39 +1005,12 @@ pub const Struct = struct { /// runtime version of the struct. pub const omitted_field = std.math.maxInt(u32); - pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(s.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(s.owner_decl); - const file = owner_decl.getFileScope(); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return s.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This struct was generated using @Type - return s.srcLoc(mod); - } + return mod.declPtr(s.owner_decl).srcLoc(mod); } pub fn haveFieldTypes(s: Struct) bool { @@ -1132,7 +1041,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1140,12 +1049,13 @@ pub const Struct = struct { if (i == index) { return @intCast(u16, bit_sum); } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } unreachable; // index out of bounds } pub const RuntimeFieldIterator = struct { + module: *Module, struct_obj: *const Struct, index: u32 = 0, @@ -1155,6 +1065,7 @@ pub const Struct = struct { }; pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex { + const mod = it.module; while (true) { var i = it.index; it.index += 1; @@ -1167,122 +1078,21 @@ pub const Struct = struct { } const field = it.struct_obj.fields.values()[i]; - if (!field.is_comptime and field.ty.hasRuntimeBits()) { + if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) { return FieldAndIndex{ .index = i, .field = field }; } } } }; - pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator { - return .{ .struct_obj = s }; - } -}; - -/// Represents the data that an enum declaration provides, when the fields -/// are auto-numbered, and there are no declarations. The integer tag type -/// is inferred to be the smallest power of two unsigned int that fits -/// the number of fields. -pub const EnumSimple = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// Set of field names in declaration order. - fields: NameMap, - - pub const NameMap = EnumFull.NameMap; - - pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); + pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator { return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), + .struct_obj = s, + .module = module, }; } }; -/// Represents the data that an enum declaration provides, when there are no -/// declarations. However an integer tag type is provided, and the enum tag values -/// are explicitly provided. -pub const EnumNumbered = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - - pub const NameMap = EnumFull.NameMap; - pub const ValueMap = EnumFull.ValueMap; - - pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } -}; - -/// Represents the data that an enum declaration provides, when there is -/// at least one tag value explicitly specified, or at least one declaration. -pub const EnumFull = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - /// Represents the declarations inside this enum. - namespace: Namespace, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); - - pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(e.owner_decl); - const file = owner_decl.getFileScope(); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return e.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This enum was generated using @Type - return e.srcLoc(mod); - } - } -}; - pub const Union = struct { /// An enum type which is used for the tag of the union. /// This type is created even for untagged unions, even when the memory @@ -1293,7 +1103,7 @@ pub const Union = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this union. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the union itself. owner_decl: Decl.Index, /// Index of the union_decl ZIR instruction. @@ -1314,6 +1124,28 @@ pub const Union = struct { requires_comptime: PropertyBoolean = .unknown, assumed_runtime_bits: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. ty: Type, @@ -1323,52 +1155,30 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, target: Target) u32 { + pub fn normalAlignment(field: Field, mod: *Module) u32 { if (field.abi_align == 0) { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } else { return field.abi_align; } } }; - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); - pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Union, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } pub fn srcLoc(self: Union, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; } - pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(u.owner_decl); - const file = owner_decl.getFileScope(); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return u.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This union was generated using @Type - return u.srcLoc(mod); - } - } - pub fn haveFieldTypes(u: Union) bool { return switch (u.status) { .none, @@ -1383,22 +1193,22 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasRuntimeBits()) return false; + if (field.ty.hasRuntimeBits(mod)) return false; } return true; } - pub fn mostAlignedField(u: Union, target: Target) u32 { + pub fn mostAlignedField(u: Union, mod: *Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values(), 0..) |field, i| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); if (field_align > most_alignment) { most_alignment = field_align; most_index = i; @@ -1408,20 +1218,20 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *Module, have_tag: bool) u32 { var max_align: u32 = 0; - if (have_tag) max_align = u.tag_ty.abiAlignment(target); + if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); max_align = @max(max_align, field_align); } return max_align; } - pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 { - return u.getLayout(target, have_tag).abi_size; + pub fn abiSize(u: Union, mod: *Module, have_tag: bool) u64 { + return u.getLayout(mod, have_tag).abi_size; } pub const Layout = struct { @@ -1451,7 +1261,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -1460,16 +1270,16 @@ pub const Union = struct { var payload_align: u32 = 0; const fields = u.fields.values(); for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = a: { if (field.abi_align == 0) { - break :a field.ty.abiAlignment(target); + break :a field.ty.abiAlignment(mod); } else { break :a field.abi_align; } }; - const field_size = field.ty.abiSize(target); + const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; biggest_field = @intCast(u32, i); @@ -1481,7 +1291,7 @@ pub const Union = struct { } } payload_align = @max(payload_align, 1); - if (!have_tag or !u.tag_ty.hasRuntimeBits()) { + if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), .abi_align = payload_align, @@ -1497,8 +1307,8 @@ pub const Union = struct { } // Put the tag before or after the payload depending on which one's // alignment is greater. - const tag_size = u.tag_ty.abiSize(target); - const tag_align = @max(1, u.tag_ty.abiAlignment(target)); + const tag_size = u.tag_ty.abiSize(mod); + const tag_align = @max(1, u.tag_ty.abiAlignment(mod)); var size: u64 = 0; var padding: u32 = undefined; if (tag_align >= payload_align) { @@ -1533,26 +1343,6 @@ pub const Union = struct { } }; -pub const Opaque = struct { - /// The Decl that corresponds to the opaque itself. - owner_decl: Decl.Index, - /// Represents the declarations inside this opaque. - namespace: Namespace, - - pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 { - return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); - } -}; - /// Some extern function struct memory is owned by the Decl's TypedValue.Managed /// arena allocator. pub const ExternFn = struct { @@ -1630,12 +1420,27 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, - /// Any inferred error sets that this function owns, both its own inferred error set and - /// inferred error sets of any inline/comptime functions called. Not to be confused - /// with inferred error sets of generic instantiations of this function, which are - /// *not* tracked here - they are tracked in the new `Fn` object created for the - /// instantiations. - inferred_error_sets: InferredErrorSetList = .{}, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not @@ -1662,16 +1467,16 @@ pub const Fn = struct { /// or comptime functions. pub const InferredErrorSet = struct { /// The function from which this error set originates. - func: *Fn, + func: Fn.Index, /// All currently known errors that this error set contains. This includes /// direct additions via `return error.Foo;`, and possibly also errors that /// are returned from any dependent functions. When the inferred error set is /// fully resolved, this map contains all the errors that the function might return. - errors: ErrorSet.NameMap = .{}, + errors: NameMap = .{}, /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(*InferredErrorSet, void) = .{}, + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, /// Whether the function returned anyerror. This is true if either of /// the dependent functions returns anyerror. @@ -1681,52 +1486,57 @@ pub const Fn = struct { /// can skip resolving any dependents of this inferred error set. is_resolved: bool = false, - pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { - switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const ies = err_set_ty.castTag(.error_set_inferred).?.data; - try self.inferred_error_sets.put(gpa, ies, {}); - }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .anyerror => { + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { + if (oi == .none) return null; + return @intToEnum(InferredErrorSet.Index, @enumToInt(oi)); + } + }; + + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + gpa: Allocator, + ) !void { + switch (err_set_ty.toIntern()) { + .anyerror_type => { self.is_anyerror = true; }, - else => unreachable, + else => switch (ip.indexToKey(err_set_ty.toIntern())) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + .inferred_error_set_type => |ies_index| { + try self.inferred_error_sets.put(gpa, ies_index, {}); + }, + else => unreachable, + }, } } }; - pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet); - pub const InferredErrorSetListNode = InferredErrorSetList.Node; - - pub fn deinit(func: *Fn, gpa: Allocator) void { - var it = func.inferred_error_sets.first; - while (it) |node| { - const next = node.next; - node.data.errors.deinit(gpa); - node.data.inferred_error_sets.deinit(gpa); - gpa.destroy(node); - it = next; - } - } - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); @@ -1741,7 +1551,7 @@ pub const Fn = struct { } pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); @@ -1764,7 +1574,7 @@ pub const Fn = struct { pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope().zir; + const zir = owner_decl.getFileScope(mod).zir; const zir_tags = zir.instructions.items(.tag); switch (zir_tags[func.zir_body_inst]) { .func => return false, @@ -1779,46 +1589,24 @@ pub const Fn = struct { } }; -pub const Var = struct { - /// if is_extern == true this is undefined - init: Value, - owner_decl: Decl.Index, - - /// Library name if specified. - /// For example `extern "c" var stderrp = ...` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - is_extern: bool, - is_mutable: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - - pub fn deinit(variable: *Var, gpa: Allocator) void { - if (variable.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - pub const DeclAdapter = struct { mod: *Module, - pub fn hash(self: @This(), s: []const u8) u32 { + pub fn hash(self: @This(), s: InternPool.NullTerminatedString) u32 { _ = self; - return @truncate(u32, std.hash.Wyhash.hash(0, s)); + return std.hash.uint32(@enumToInt(s)); } - pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool { + pub fn eql(self: @This(), a: InternPool.NullTerminatedString, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const b_decl = self.mod.declPtr(b_decl_index); - return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0)); + return a == b_decl.name; } }; /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { - parent: ?*Namespace, + parent: OptionalIndex, file_scope: *File, /// Will be a struct, enum, union, or opaque. ty: Type, @@ -1836,21 +1624,41 @@ pub const Namespace = struct { /// Value is whether the usingnamespace decl is marked `pub`. usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + const DeclContext = struct { module: *Module, pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 { const decl = ctx.module.declPtr(decl_index); - return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0))); + return std.hash.uint32(@enumToInt(decl.name)); } pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const a_decl = ctx.module.declPtr(a_decl_index); const b_decl = ctx.module.declPtr(b_decl_index); - const a_name = mem.sliceTo(a_decl.name, 0); - const b_name = mem.sliceTo(b_decl.name, 0); - return mem.eql(u8, a_name, b_name); + return a_decl.name == b_decl.name; } }; @@ -1862,8 +1670,6 @@ pub const Namespace = struct { pub fn destroyDecls(ns: *Namespace, mod: *Module) void { const gpa = mod.gpa; - log.debug("destroyDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1889,8 +1695,6 @@ pub const Namespace = struct { ) !void { const gpa = mod.gpa; - log.debug("deleteAllDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1919,46 +1723,38 @@ pub const Namespace = struct { pub fn renderFullyQualifiedName( ns: Namespace, mod: *Module, - name: []const u8, + name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); - const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); + if (ns.parent.unwrap()) |parent| { + const decl = mod.declPtr(ns.getDeclIndex(mod)); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl.name, writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } - if (name.len != 0) { - try writer.writeAll("."); - try writer.writeAll(name); - } + if (name != .empty) try writer.print(".{}", .{name.fmt(&mod.intern_pool)}); } /// This renders e.g. "std/fs.zig:Dir.OpenOptions" pub fn renderFullyQualifiedDebugName( ns: Namespace, mod: *Module, - name: []const u8, + name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { - var separator_char: u8 = '.'; - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); - const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); - } else { + const separator_char: u8 = if (ns.parent.unwrap()) |parent| sep: { + const decl = mod.declPtr(ns.getDeclIndex(mod)); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl.name, writer); + break :sep '.'; + } else sep: { try ns.file_scope.renderFullyQualifiedDebugName(writer); - separator_char = ':'; - } - if (name.len != 0) { - try writer.writeByte(separator_char); - try writer.writeAll(name); - } + break :sep ':'; + }; + if (name != .empty) try writer.print("{c}{}", .{ separator_char, name.fmt(&mod.intern_pool) }); } - pub fn getDeclIndex(ns: Namespace) Decl.Index { - return ns.ty.getOwnerDecl(); + pub fn getDeclIndex(ns: Namespace, mod: *Module) Decl.Index { + return ns.ty.getOwnerDecl(mod); } }; @@ -2140,11 +1936,11 @@ pub const File = struct { }; } - pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 { - var buf = std.ArrayList(u8).init(gpa); - defer buf.deinit(); - try file.renderFullyQualifiedName(buf.writer()); - return buf.toOwnedSliceSentinel(0); + pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + const start = ip.string_bytes.items.len; + try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); + return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start); } /// Returns the full path to this file relative to its package. @@ -2268,7 +2064,7 @@ pub const ErrorMsg = struct { reference_trace: []Trace = &.{}, pub const Trace = struct { - decl: ?[*:0]const u8, + decl: InternPool.OptionalNullTerminatedString, src_loc: SrcLoc, hidden: u32 = 0, }; @@ -2281,7 +2077,7 @@ pub const ErrorMsg = struct { ) !*ErrorMsg { const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); - err_msg.* = try init(gpa, src_loc, format, args); + err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); return err_msg; } @@ -3287,7 +3083,7 @@ pub const LazySrcLoc = union(enum) { } /// Upgrade to a `SrcLoc` based on the `Decl` provided. - pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc { + pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl, mod: *Module) SrcLoc { return switch (lazy) { .unneeded, .entire_file, @@ -3295,7 +3091,7 @@ pub const LazySrcLoc = union(enum) { .token_abs, .node_abs, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = 0, .lazy = lazy, }, @@ -3361,7 +3157,7 @@ pub const LazySrcLoc = union(enum) { .for_input, .for_capture_from_input, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = lazy, }, @@ -3391,6 +3187,12 @@ pub const CompileError = error{ ComptimeBreak, }; +pub fn init(mod: *Module) !void { + const gpa = mod.gpa; + try mod.intern_pool.init(gpa); + try mod.global_error_set.put(gpa, .empty, {}); +} + pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -3489,42 +3291,29 @@ pub fn deinit(mod: *Module) void { } mod.export_owners.deinit(gpa); - { - var it = mod.global_error_set.keyIterator(); - while (it.next()) |key| { - gpa.free(key.*); - } - mod.global_error_set.deinit(gpa); - } + mod.global_error_set.deinit(gpa); - mod.error_name_list.deinit(gpa); mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); - { - var it = mod.memoized_calls.iterator(); - while (it.next()) |entry| { - gpa.free(entry.key_ptr.args); - entry.value_ptr.arena.promote(gpa).deinit(); - } - mod.memoized_calls.deinit(gpa); - } - mod.decls_free_list.deinit(gpa); mod.allocated_decls.deinit(gpa); mod.global_assembly.deinit(gpa); mod.reference_table.deinit(gpa); - mod.string_literal_table.deinit(gpa); - mod.string_literal_bytes.deinit(gpa); + mod.namespaces_free_list.deinit(gpa); + mod.allocated_namespaces.deinit(gpa); + + mod.memoized_decls.deinit(gpa); + mod.intern_pool.deinit(gpa); + mod.tmp_hack_arena.deinit(); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; { const decl = mod.declPtr(decl_index); - log.debug("destroy {*} ({s})", .{ decl, decl.name }); _ = mod.test_functions.swapRemove(decl_index); if (decl.deletion_flag) { assert(mod.deletion_set.swapRemove(decl_index)); @@ -3533,14 +3322,15 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { gpa.free(kv.value); } if (decl.has_tv) { - if (decl.getInnerNamespace()) |namespace| { - namespace.destroyDecls(mod); + if (decl.getOwnedInnerNamespaceIndex(mod).unwrap()) |i| { + mod.namespacePtr(i).destroyDecls(mod); + mod.destroyNamespace(i); } } + if (decl.src_scope) |scope| scope.decRef(gpa); decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); - decl.clearName(gpa); decl.* = undefined; } mod.decls_free_list.append(gpa, decl_index) catch { @@ -3554,24 +3344,55 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } -pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl { - return mod.allocated_decls.at(@enumToInt(decl_index)); +pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { + return mod.allocated_decls.at(@enumToInt(index)); +} + +pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { + return mod.allocated_namespaces.at(@enumToInt(index)); +} + +pub fn unionPtr(mod: *Module, index: Union.Index) *Union { + return mod.intern_pool.unionPtr(index); +} + +pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { + return mod.intern_pool.structPtr(index); +} + +pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { + return mod.intern_pool.funcPtr(index); +} + +pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { + return mod.intern_pool.inferredErrorSetPtr(index); +} + +pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { + return mod.namespacePtr(index.unwrap() orelse return null); +} + +/// This one accepts an index from the InternPool and asserts that it is not +/// the anonymous empty struct type. +pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { + return mod.structPtr(index.unwrap() orelse return null); +} + +pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { + return mod.funcPtr(index.unwrap() orelse return null); } /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); - if (decl.src_namespace.parent != null) + const namespace = mod.namespacePtr(decl.src_namespace); + if (namespace.parent != .none) return false; - return decl_index == decl.src_namespace.getDeclIndex(); + return decl_index == namespace.getDeclIndex(mod); } fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { - for (export_list.items) |exp| { - gpa.free(exp.options.name); - if (exp.options.section) |s| gpa.free(s); - gpa.destroy(exp); - } + for (export_list.items) |exp| gpa.destroy(exp); export_list.deinit(gpa); } @@ -3990,9 +3811,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (decl.zir_decl_index != 0) { const old_zir_decl_index = decl.zir_decl_index; const new_zir_decl_index = extra_map.get(old_zir_decl_index) orelse { - log.debug("updateZirRefs {s}: delete {*} ({s})", .{ - file.sub_file_path, decl, decl.name, - }); try file.deleted_decls.append(gpa, decl_index); continue; }; @@ -4000,41 +3818,34 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { decl.zir_decl_index = new_zir_decl_index; const new_hash = decl.contentsHashZir(new_zir); if (!std.zig.srcHashEql(old_hash, new_hash)) { - log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); try file.outdated_decls.append(gpa, decl_index); - } else { - log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); } } if (!decl.owns_tv) continue; - if (decl.getStruct()) |struct_obj| { + if (decl.getOwnedStruct(mod)) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getUnion()) |union_obj| { + if (decl.getOwnedUnion(mod)) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getFunction()) |func| { + if (decl.getOwnedFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } @@ -4207,14 +4018,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .complete => return, .outdated => blk: { - log.debug("re-analyzing {*} ({s})", .{ decl, decl.name }); - // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunction()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4223,9 +4032,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); try mod.markDeclForDeletion(dep_index); } } @@ -4237,7 +4043,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .unreferenced => false, }; - var decl_prog_node = mod.sema_prog_node.start(mem.sliceTo(decl.name, 0), 0); + var decl_prog_node = mod.sema_prog_node.start("", 0); decl_prog_node.activate(); defer decl_prog_node.end(); @@ -4264,7 +4070,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -4277,7 +4083,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // Update all dependents which have at least this level of dependency. // If our type remained the same and we're a function, only update // decls which depend on our body; otherwise, update all dependents. - const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal; + const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal; for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| { if (@enumToInt(dep_type) < @enumToInt(update_level)) continue; @@ -4304,10 +4110,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4339,7 +4146,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { defer tmp_arena.deinit(); const sema_arena = tmp_arena.allocator(); - var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) { + var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { if (func.state == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -4365,17 +4172,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_air and !dump_llvm_ir) return; - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { const fqn = try decl.getFullyQualifiedName(mod); - defer mod.gpa.free(fqn); - - std.debug.print("# Begin Function AIR: {s}:\n", .{fqn}); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)}); @import("print_air.zig").dump(mod, air, liveness); - std.debug.print("# End Function AIR: {s}\n\n", .{fqn}); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)}); } if (std.debug.runtime_safety) { @@ -4383,6 +4187,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { .gpa = gpa, .air = air, .liveness = liveness, + .intern_pool = &mod.intern_pool, }; defer verify.deinit(); @@ -4394,7 +4199,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -4407,7 +4212,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_llvm_ir) return; - comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) { + comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; @@ -4417,7 +4222,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -4437,7 +4242,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4475,7 +4281,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -4527,42 +4333,54 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.root_decl != .none) return; const gpa = mod.gpa; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const ty_ty = comptime Type.initTag(.type); - struct_obj.* = .{ - .owner_decl = undefined, // set below + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_namespace_index = try mod.createNamespace(.{ + .parent = .none, + .ty = undefined, + .file_scope = file, + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null); + const new_decl = mod.declPtr(new_decl_index); + errdefer @panic("TODO error handling"); + + const struct_index = try mod.createStruct(.{ + .owner_decl = new_decl_index, .fields = .{}, .zir_index = undefined, // set below .layout = .Auto, .status = .none, .known_non_opv = undefined, .is_tuple = undefined, // set below - .namespace = .{ - .parent = null, - .ty = struct_ty, - .file_scope = file, - }, - }; - const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null); - const new_decl = mod.declPtr(new_decl_index); + .namespace = new_namespace_index, + }); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(struct_ty); + + new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); - struct_obj.owner_decl = new_decl_index; - new_decl.name = try file.fullyQualifiedNameZ(gpa); + + new_decl.name = try file.fullyQualifiedName(mod); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; new_decl.has_align = false; new_decl.has_linksection_or_addrspace = false; - new_decl.ty = ty_ty; - new_decl.val = struct_val; + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive. @@ -4573,6 +4391,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.status == .success_zir) { assert(file.zir_loaded); const main_struct_inst = Zir.main_struct_inst; + const struct_obj = mod.structPtr(struct_index); struct_obj.zir_index = main_struct_inst; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -4582,25 +4401,34 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { defer sema_arena.deinit(); const sema_arena_allocator = sema_arena.allocator(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = sema_arena_allocator, - .perm_arena = new_decl_arena_allocator, .code = file.zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); + var wip_captures = try WipCaptureScope.init(gpa, null); defer wip_captures.deinit(); - if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| { + if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| { try wip_captures.finalize(); + for (comptime_mutable_decls.items) |decl_index| { + const decl = mod.declPtr(decl_index); + try decl.intern(mod); + } new_decl.analysis = .complete; } else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -4632,8 +4460,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { } else { new_decl.analysis = .file_failure; } - - try new_decl.finalizeNewArena(&new_decl_arena); } /// Returns `true` if the Decl type changed. @@ -4645,68 +4471,52 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { const decl = mod.declPtr(decl_index); - if (decl.getFileScope().status != .success_zir) { + if (decl.getFileScope(mod).status != .success_zir) { return error.AnalysisFail; } const gpa = mod.gpa; - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const zir_datas = zir.instructions.items(.data); decl.analysis = .in_progress; - // We need the memory for the Type to go into the arena for the Decl - var decl_arena = std.heap.ArenaAllocator.init(gpa); - const decl_arena_allocator = decl_arena.allocator(); - const decl_value_arena = blk: { - errdefer decl_arena.deinit(); - const s = try decl_arena_allocator.create(ValueArena); - s.* = .{ .state = undefined }; - break :blk s; - }; - defer { - if (decl.value_arena) |value_arena| { - assert(value_arena.state_acquired == null); - decl_value_arena.prev = value_arena; - } - - decl_value_arena.state = decl_arena.state; - decl.value_arena = decl_value_arena; - } - var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - const analysis_arena_allocator = analysis_arena.allocator(); + + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena_allocator, - .perm_arena = decl_arena_allocator, + .arena = analysis_arena.allocator(), .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); if (mod.declIsRoot(decl_index)) { - log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; - const struct_obj = decl.getStruct().?; + const struct_index = decl.getOwnedStructIndex(mod).unwrap().?; + const struct_obj = mod.structPtr(struct_index); // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. struct_obj.zir_index = main_struct_inst; - try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj); + try sema.analyzeStructDecl(decl, main_struct_inst, struct_index); decl.analysis = .complete; decl.generation = mod.generation; return false; } - log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Sema.Block = .{ @@ -4724,12 +4534,16 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { block_scope.params.deinit(gpa); } - const zir_block_index = decl.zirBlockIndex(); + const zir_block_index = decl.zirBlockIndex(mod); const inst_data = zir_datas[zir_block_index].pl_node; const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index); const body = zir.extra[extra.end..][0..extra.data.body_len]; const result_ref = (try sema.analyzeBodyBreak(&block_scope, body)).?.operand; try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 }; const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 }; const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 }; @@ -4748,16 +4562,15 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - var buffer: Value.ToTypeBuffer = undefined; - const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator); - if (ty.getNamespace() == null) { + const ty = decl_tv.val.toType(); + if (ty.getNamespace(mod) == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } - decl.ty = Type.type; - decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty); + decl.ty = InternPool.Index.type_type.toType(); + decl.val = ty.toValue(); decl.@"align" = 0; - decl.@"linksection" = null; + decl.@"linksection" = .none; decl.has_tv = true; decl.owns_tv = false; decl.analysis = .complete; @@ -4766,8 +4579,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (decl_tv.val.castTag(.function)) |fn_payload| { - const func = fn_payload.data; + if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { + const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { var prev_type_has_bits = false; @@ -4775,31 +4588,30 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction()) |prev_func| { + if (decl.getOwnedFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } decl.clearValues(mod); - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); // linksection, align, and addrspace were already set by Sema decl.has_tv = true; decl.owns_tv = owns_tv; decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl.ty.fnCallingConvention() == .Inline; + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; if (is_inline) { return sema.fail(&block_scope, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; - try sema.analyzeExport(&block_scope, export_src, options, decl_index); + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } return type_changed or is_inline != prev_is_inline; } @@ -4813,64 +4625,57 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.owns_tv = false; var queue_linker_work = false; var is_extern = false; - switch (decl_tv.val.tag()) { - .variable => { - const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl_index) { + switch (decl_tv.val.toIntern()) { + .generic_poison => unreachable, + .unreachable_value => unreachable, + else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { + .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; + }, - const copied_init = try variable.init.copy(decl_arena_allocator); - variable.init = copied_init; - } - }, - .extern_fn => { - const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl_index) { + .extern_func => |extern_fn| if (extern_fn.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; is_extern = true; - } - }, + }, - .generic_poison => unreachable, - .unreachable_value => unreachable, + .func => {}, - .function => {}, - - else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); - queue_linker_work = true; + else => { + queue_linker_work = true; + }, }, } - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); decl.@"align" = blk: { - const align_ref = decl.zirAlignRef(); + const align_ref = decl.zirAlignRef(mod); if (align_ref == .none) break :blk 0; break :blk try sema.resolveAlign(&block_scope, align_src, align_ref); }; decl.@"linksection" = blk: { - const linksection_ref = decl.zirLinksectionRef(); - if (linksection_ref == .none) break :blk null; + const linksection_ref = decl.zirLinksectionRef(mod); + if (linksection_ref == .none) break :blk .none; const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known"); if (mem.indexOfScalar(u8, bytes, 0) != null) { return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; + const section = try mod.intern_pool.getOrPutString(gpa, bytes); + break :blk section.toOptional(); }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) { - .function, .extern_fn => .function, + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, + .extern_func, .func => .function, else => .constant, }; const target = sema.mod.getTarget(); - break :blk switch (decl.zirAddrspaceRef()) { + break :blk switch (decl.zirAddrspaceRef(mod)) { .none => switch (addrspace_ctx) { .function => target_util.defaultAddressSpace(target, .function), .variable => target_util.defaultAddressSpace(target, .global_mutable), @@ -4888,7 +4693,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { (queue_linker_work and try sema.typeHasRuntimeBits(decl.ty)); if (has_runtime_bits) { - log.debug("queue linker work for {*} ({s})", .{ decl, decl.name }); // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. @@ -4904,8 +4708,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; - try sema.analyzeExport(&block_scope, export_src, options, decl_index); + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } return type_changed; @@ -4930,10 +4733,6 @@ pub fn declareDeclDependencyType(mod: *Module, depender_index: Decl.Index, depen } } - log.debug("{*} ({s}) depends on {*} ({s})", .{ - depender, depender.name, dependee, dependee.name, - }); - if (dependee.deletion_flag) { dependee.deletion_flag = false; assert(mod.deletion_set.swapRemove(dependee_index)); @@ -5222,7 +5021,7 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void { pub fn scanNamespace( mod: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, extra_start: usize, decls_len: u32, parent_decl: *Decl, @@ -5231,6 +5030,7 @@ pub fn scanNamespace( defer tracy.end(); const gpa = mod.gpa; + const namespace = mod.namespacePtr(namespace_index); const zir = namespace.file_scope.zir; try mod.comp.work_queue.ensureUnusedCapacity(decls_len); @@ -5243,7 +5043,7 @@ pub fn scanNamespace( var decl_i: u32 = 0; var scan_decl_iter: ScanDeclIter = .{ .module = mod, - .namespace = namespace, + .namespace_index = namespace_index, .parent_decl = parent_decl, }; while (decl_i < decls_len) : (decl_i += 1) { @@ -5266,7 +5066,7 @@ pub fn scanNamespace( const ScanDeclIter = struct { module: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, parent_decl: *Decl, usingnamespace_index: usize = 0, comptime_index: usize = 0, @@ -5278,9 +5078,11 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err defer tracy.end(); const mod = iter.module; - const namespace = iter.namespace; + const namespace_index = iter.namespace_index; + const namespace = mod.namespacePtr(namespace_index); const gpa = mod.gpa; const zir = namespace.file_scope.zir; + const ip = &mod.intern_pool; // zig fmt: off const is_pub = (flags & 0b0001) != 0; @@ -5300,31 +5102,31 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // Every Decl needs a name. var is_named_test = false; var kind: Decl.Kind = .named; - const decl_name: [:0]const u8 = switch (decl_name_index) { + const decl_name: InternPool.NullTerminatedString = switch (decl_name_index) { 0 => name: { if (export_bit) { const i = iter.usingnamespace_index; iter.usingnamespace_index += 1; kind = .@"usingnamespace"; - break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "usingnamespace_{d}", .{i}); } else { const i = iter.comptime_index; iter.comptime_index += 1; kind = .@"comptime"; - break :name try std.fmt.allocPrintZ(gpa, "comptime_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "comptime_{d}", .{i}); } }, 1 => name: { const i = iter.unnamed_test_index; iter.unnamed_test_index += 1; kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "test_{d}", .{i}); }, 2 => name: { is_named_test = true; const test_name = zir.nullTerminatedString(decl_doccomment_index); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "decltest.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "decltest.{s}", .{test_name}); }, else => name: { const raw_name = zir.nullTerminatedString(decl_name_index); @@ -5332,14 +5134,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err is_named_test = true; const test_name = zir.nullTerminatedString(decl_name_index + 1); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "test.{s}", .{test_name}); } else { - break :name try gpa.dupeZ(u8, raw_name); + break :name try ip.getOrPutString(gpa, raw_name); } }, }; - var must_free_decl_name = true; - defer if (must_free_decl_name) gpa.free(decl_name); const is_exported = export_bit and decl_name_index != 0; if (kind == .@"usingnamespace") try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1); @@ -5347,21 +5147,19 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // We create a Decl for it regardless of analysis status. const gop = try namespace.decls.getOrPutContextAdapted( gpa, - @as([]const u8, mem.sliceTo(decl_name, 0)), + decl_name, DeclAdapter{ .mod = mod }, Namespace.DeclContext{ .module = mod }, ); const comp = mod.comp; if (!gop.found_existing) { - const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; - must_free_decl_name = false; if (kind == .@"usingnamespace") { namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub); } - log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace }); new_decl.src_line = line; gop.key_ptr.* = new_decl_index; // Exported decls, comptime decls, usingnamespace decls, and @@ -5382,7 +5180,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err if (!comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; if (comp.test_filter) |test_filter| { - if (mem.indexOf(u8, decl_name, test_filter) == null) { + if (mem.indexOf(u8, ip.stringToSlice(decl_name), test_filter) == null) { break :blk false; } } @@ -5405,16 +5203,13 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const decl = mod.declPtr(decl_index); if (kind == .@"test") { const src_loc = SrcLoc{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = .{ .token_offset = 1 }, }; - const msg = try ErrorMsg.create( - gpa, - src_loc, - "duplicate test name: {s}", - .{decl_name}, - ); + const msg = try ErrorMsg.create(gpa, src_loc, "duplicate test name: {}", .{ + decl_name.fmt(&mod.intern_pool), + }); errdefer msg.destroy(gpa); try mod.failed_decls.putNoClobber(gpa, decl_index, msg); const other_src_loc = SrcLoc{ @@ -5424,7 +5219,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err }; try mod.errNoteNonLazy(other_src_loc, msg, "other test here", .{}); } - log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace }); // Update the AST node of the decl; even if its contents are unchanged, it may // have been re-ordered. decl.src_node = decl_node; @@ -5436,7 +5230,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunction()) |_| { + if (decl.getOwnedFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5458,7 +5252,6 @@ pub fn clearDecl( defer tracy.end(); const decl = mod.declPtr(decl_index); - log.debug("clearing {*} ({s})", .{ decl, decl.name }); const gpa = mod.gpa; try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count()); @@ -5473,9 +5266,6 @@ pub fn clearDecl( const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; @@ -5510,10 +5300,10 @@ pub fn clearDecl( try mod.deleteDeclExports(decl_index); if (decl.has_tv) { - if (decl.ty.isFnOrHasRuntimeBits()) { + if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } } @@ -5530,10 +5320,9 @@ pub fn clearDecl( /// This function is exclusively called for anonymous decls. pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); const dependants = decl.dependants.keys(); for (dependants) |dep| { @@ -5558,10 +5347,9 @@ fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void { /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. @@ -5575,6 +5363,17 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { mod.destroyDecl(decl_index); } +/// Finalize the creation of an anon decl. +pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { + // The Decl starts off with alive=false and the codegen backend will set alive=true + // if the Decl is referenced by an instruction or another constant. Otherwise, + // the Decl will be garbage collected by the `codegen_decl` task instead of sent + // to the linker. + if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) { + try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index }); + } +} + /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { @@ -5600,51 +5399,53 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void } } if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { - elf.deleteDeclExport(decl_index, exp.options.name); + elf.deleteDeclExport(decl_index, exp.opts.name); } if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { - try macho.deleteDeclExport(decl_index, exp.options.name); + try macho.deleteDeclExport(decl_index, exp.opts.name); } if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| { wasm.deleteDeclExport(decl_index); } if (mod.comp.bin_file.cast(link.File.Coff)) |coff| { - coff.deleteDeclExport(decl_index, exp.options.name); + coff.deleteDeclExport(decl_index, exp.opts.name); } if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { failed_kv.value.destroy(mod.gpa); } - mod.gpa.free(exp.options.name); mod.gpa.destroy(exp); } export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - // Use the Decl's arena for captured values. - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + + const fn_ty = decl.ty; var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = arena, - .perm_arena = decl_arena_allocator, - .code = decl.getFileScope().zir, + .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = func, - .fn_ret_ty = decl.ty.fnReturnType(), + .func_index = func_index.toOptional(), + .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(), .owner_func = func, + .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -5656,7 +5457,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); sema.air_extra.items.len += reserved_count; - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var inner_block: Sema.Block = .{ @@ -5680,9 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const fn_ty = decl.ty; - const fn_ty_info = fn_ty.fnInfo(); - const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); + const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); @@ -5697,9 +5496,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const param_ty = if (func.comptime_args) |comptime_args| t: { const arg_tv = comptime_args[total_param_index]; - const arg_val = if (arg_tv.val.tag() != .generic_poison) + const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val - else if (arg_tv.ty.onePossibleValue()) |opv| + else if (try arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -5708,7 +5507,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index]; + } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -5740,7 +5539,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { } func.state = .in_progress; - log.debug("set {s} to in_progress", .{decl.name}); const last_arg_index = inner_block.instructions.items.len; @@ -5765,7 +5563,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // is unused so it just has to be a no-op. sema.air_instructions.set(ptr_inst.*, .{ .tag = .alloc, - .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) }, + .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, }); } } @@ -5773,7 +5571,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // If we don't get an error return trace from a caller, create our own. if (func.calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and - !sema.fn_ret_ty.isError()) + !sema.fn_ret_ty.isError(mod)) { sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { // TODO make these unreachable instead of @panic @@ -5786,6 +5584,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } // Copy the block into place and mark that as the main block. try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + @@ -5797,14 +5599,13 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; - log.debug("set {s} to success", .{decl.name}); // Finally we must resolve the return type and parameter types so that backends // have full access to type information. // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(fn_ty_info) catch |err| switch (err) { + sema.resolveFnTypes(fn_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -5820,9 +5621,8 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // Similarly, resolve any queued up types that were requested to be resolved for // the backends. - for (sema.types_to_resolve.items) |inst_ref| { - const ty = sema.getTmpAir().getRefType(inst_ref); - sema.resolveTypeFully(ty) catch |err| switch (err) { + for (sema.types_to_resolve.keys()) |ty| { + sema.resolveTypeFully(ty.toType()) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -5840,13 +5640,11 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), - .values = try sema.air_values.toOwnedSlice(gpa), }; } fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { const decl = mod.declPtr(decl_index); - log.debug("mark outdated {*} ({s})", .{ decl, decl.name }); try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index }); if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); @@ -5854,11 +5652,8 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| { for (kv.value) |err| err.deinit(mod.gpa); } - if (decl.has_tv and decl.owns_tv) { - if (decl.val.castTag(.function)) |payload| { - const func = payload.data; - _ = mod.align_stack_fns.remove(func); - } + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { + _ = mod.align_stack_fns.remove(func); } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { @@ -5869,9 +5664,51 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { decl.analysis = .outdated; } +pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { + if (mod.namespaces_free_list.popOrNull()) |index| { + mod.allocated_namespaces.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try mod.allocated_namespaces.addOne(mod.gpa); + ptr.* = initialization; + return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); +} + +pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { + mod.namespacePtr(index).* = undefined; + mod.namespaces_free_list.append(mod.gpa, index) catch { + // In order to keep `destroyNamespace` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Namespace until garbage collection. + }; +} + +pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index { + return mod.intern_pool.createStruct(mod.gpa, initialization); +} + +pub fn destroyStruct(mod: *Module, index: Struct.Index) void { + return mod.intern_pool.destroyStruct(mod.gpa, index); +} + +pub fn createUnion(mod: *Module, initialization: Union) Allocator.Error!Union.Index { + return mod.intern_pool.createUnion(mod.gpa, initialization); +} + +pub fn destroyUnion(mod: *Module, index: Union.Index) void { + return mod.intern_pool.destroyUnion(mod.gpa, index); +} + +pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { + return mod.intern_pool.createFunc(mod.gpa, initialization); +} + +pub fn destroyFunc(mod: *Module, index: Fn.Index) void { + return mod.intern_pool.destroyFunc(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, - namespace: *Namespace, + namespace: Namespace.Index, src_node: Ast.Node.Index, src_scope: ?*CaptureScope, ) !Decl.Index { @@ -5896,6 +5733,7 @@ pub fn allocateNewDecl( }; }; + if (src_scope) |scope| scope.incRef(); decl_and_index.new_decl.* = .{ .name = undefined, .src_namespace = namespace, @@ -5906,7 +5744,7 @@ pub fn allocateNewDecl( .ty = undefined, .val = undefined, .@"align" = undefined, - .@"linksection" = undefined, + .@"linksection" = .none, .@"addrspace" = .generic, .analysis = .unreferenced, .deletion_flag = false, @@ -5924,25 +5762,20 @@ pub fn allocateNewDecl( return decl_and_index.decl_index; } -/// Get error value for error tag `name`. -pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV { +pub fn getErrorValue( + mod: *Module, + name: InternPool.NullTerminatedString, +) Allocator.Error!ErrorInt { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - if (gop.found_existing) { - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; - } + return @intCast(ErrorInt, gop.index); +} - errdefer assert(mod.global_error_set.remove(name)); - try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1); - gop.key_ptr.* = try mod.gpa.dupe(u8, name); - gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len); - mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*); - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; +pub fn getErrorValueFromSlice( + mod: *Module, + name: []const u8, +) Allocator.Error!ErrorInt { + const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name); + return getErrorValue(mod, interned_name); } pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index { @@ -5953,29 +5786,28 @@ pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedV pub fn createAnonymousDeclFromDecl( mod: *Module, src_decl: *Decl, - namespace: *Namespace, + namespace: Namespace.Index, src_scope: ?*CaptureScope, tv: TypedValue, ) !Decl.Index { const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); errdefer mod.destroyDecl(new_decl_index); - const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{ - src_decl.name, @enumToInt(new_decl_index), + const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{ + src_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index), }); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); return new_decl_index; } -/// Takes ownership of `name` even if it returns an error. pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, - namespace: *Namespace, + namespace: Namespace.Index, typed_value: TypedValue, - name: [:0]u8, -) !void { - errdefer mod.gpa.free(name); + name: InternPool.NullTerminatedString, +) Allocator.Error!void { + assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); const new_decl = mod.declPtr(new_decl_index); @@ -5984,34 +5816,12 @@ pub fn initNewAnonDecl( new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); - - // The Decl starts off with alive=false and the codegen backend will set alive=true - // if the Decl is referenced by an instruction or another constant. Otherwise, - // the Decl will be garbage collected by the `codegen_decl` task instead of sent - // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits()) { - try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); - } -} - -pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { - const int_payload = try arena.create(Type.Payload.Bits); - int_payload.* = .{ - .base = .{ - .tag = switch (signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - }, - }, - .data = bits, - }; - return Type.initPayload(&int_payload.base); + try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); } pub fn errNoteNonLazy( @@ -6073,16 +5883,17 @@ pub const SwitchProngSrc = union(enum) { /// the LazySrcLoc in order to emit a compile error. pub fn resolve( prong_src: SwitchProngSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, switch_node_offset: i32, range_expand: RangeExpand, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6166,11 +5977,12 @@ pub const PeerTypeCandidateSrc = union(enum) { pub fn resolve( self: PeerTypeCandidateSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, candidate_i: usize, ) ?LazySrcLoc { @setCold(true); + const gpa = mod.gpa; switch (self) { .none => { @@ -6192,10 +6004,10 @@ pub const PeerTypeCandidateSrc = union(enum) { else => {}, } - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6254,15 +6066,16 @@ fn queryFieldSrc( pub fn paramSrc( func_node_offset: i32, - gpa: Allocator, + mod: *Module, decl: *Decl, param_i: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6284,19 +6097,20 @@ pub fn paramSrc( } pub fn argSrc( + mod: *Module, call_node_offset: i32, - gpa: Allocator, decl: *Decl, start_arg_i: usize, bound_arg_src: ?LazySrcLoc, ) LazySrcLoc { + @setCold(true); + const gpa = mod.gpa; if (start_arg_i == 0 and bound_arg_src != null) return bound_arg_src.?; const arg_i = start_arg_i - @boolToInt(bound_arg_src != null); - @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6310,7 +6124,7 @@ pub fn argSrc( const node_datas = tree.nodes.items(.data); const call_args_node = tree.extra_data[node_datas[node].rhs - 1]; const call_args_offset = decl.nodeIndexToRelative(call_args_node); - return initSrc(call_args_offset, gpa, decl, arg_i); + return mod.initSrc(call_args_offset, decl, arg_i); }, else => unreachable, }; @@ -6318,16 +6132,17 @@ pub fn argSrc( } pub fn initSrc( + mod: *Module, init_node_offset: i32, - gpa: Allocator, decl: *Decl, init_index: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6363,12 +6178,13 @@ pub fn initSrc( } } -pub fn optionsSrc(gpa: Allocator, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { +pub fn optionsSrc(mod: *Module, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6430,11 +6246,13 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // deletion set at this time. for (file.deleted_decls.items) |decl_index| { const decl = mod.declPtr(decl_index); - log.debug("deleted from source: {*} ({s})", .{ decl, decl.name }); // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); - _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod }); + _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted( + decl.name, + DeclAdapter{ .mod = mod }, + ); try mod.clearDecl(decl_index, &outdated_decls); mod.destroyDecl(decl_index); @@ -6454,7 +6272,7 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { pub fn processExports(mod: *Module) !void { const gpa = mod.gpa; // Map symbol names to `Export` for name collision detection. - var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{}; + var symbol_exports: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export) = .{}; defer symbol_exports.deinit(gpa); var it = mod.decl_exports.iterator(); @@ -6462,13 +6280,13 @@ pub fn processExports(mod: *Module) !void { const exported_decl = entry.key_ptr.*; const exports = entry.value_ptr.items; for (exports) |new_export| { - const gop = try symbol_exports.getOrPut(gpa, new_export.options.name); + const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(mod); - const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ - new_export.options.name, + const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{ + new_export.opts.name.fmt(&mod.intern_pool), }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; @@ -6501,11 +6319,16 @@ pub fn populateTestFunctions( main_progress_node: *std.Progress.Node, ) !void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); - const builtin_namespace = root_decl.src_namespace; - const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?; + const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const test_functions_str = try ip.getOrPutString(gpa, "test_functions"); + const decl_index = builtin_namespace.decls.getKeyAdapted( + test_functions_str, + DeclAdapter{ .mod = mod }, + ).?; { // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. @@ -6518,90 +6341,117 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); + const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const null_usize = try mod.intern(.{ .opt = .{ + .ty = try mod.intern(.{ .opt_type = .usize_type }), + .val = .none, + } }); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); - const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ - .ty = try Type.Tag.array.create(arena, .{ - .len = test_fn_vals.len, - .elem_type = try tmp_test_fn_ty.copy(arena), - }), - .val = try Value.Tag.aggregate.create(arena, test_fn_vals), - }); - const array_decl = mod.declPtr(array_decl_index); + const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count()); + defer gpa.free(test_fn_vals); // Add a dependency on each test name and function pointer. - try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); + var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){}; + defer array_decl_dependencies.deinit(gpa); + try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); - for (mod.test_functions.keys(), 0..) |test_decl_index, i| { + for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_name_slice = mem.sliceTo(test_decl.name, 0); + // TODO: write something like getCoercedInts to avoid needing to dupe + const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(test_decl.name)); + defer gpa.free(test_decl_name); const test_name_decl_index = n: { - var name_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer name_decl_arena.deinit(); - const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); - const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len), - .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), + const test_name_decl_ty = try mod.arrayType(.{ + .len = test_decl_name.len, + .child = .u8_type, + }); + const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = test_name_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = test_name_decl_ty.toIntern(), + .storage = .{ .bytes = test_decl_name }, + } })).toValue(), }); - try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); break :n test_name_decl_index; }; - array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal); - array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); + array_decl_dependencies.appendAssumeCapacity(test_decl_index); + array_decl_dependencies.appendAssumeCapacity(test_name_decl_index); try mod.linkerUpdateDecl(test_name_decl_index); - const field_vals = try arena.create([3]Value); - field_vals.* = .{ - try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), - }), // name - try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.initTag(.null_value), // async_frame_size + const test_fn_fields = .{ + // name + try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = test_name_decl_index }, + .len = try mod.intern(.{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = test_decl_name.len }, + } }), + } }), + // func + try mod.intern(.{ .ptr = .{ + .ty = try mod.intern(.{ .ptr_type = .{ + .child = test_decl.ty.toIntern(), + .flags = .{ + .is_const = true, + }, + } }), + .addr = .{ .decl = test_decl_index }, + } }), + // async_frame_size + null_usize, }; - test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); + test_fn_val.* = try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.toIntern(), + .storage = .{ .elems = &test_fn_fields }, + } }); + } + + const array_decl_ty = try mod.arrayType(.{ + .len = test_fn_vals.len, + .child = test_fn_ty.toIntern(), + .sentinel = .none, + }); + const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = array_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = test_fn_vals }, + } })).toValue(), + }); + for (array_decl_dependencies.items) |array_decl_dependency| { + try mod.declareDeclDependency(array_decl_index, array_decl_dependency); } - try array_decl.finalizeNewArena(&new_decl_arena); break :d array_decl_index; }; try mod.linkerUpdateDecl(array_decl_index); { - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); + const new_ty = try mod.ptrType(.{ + .child = test_fn_ty.toIntern(), + .flags = .{ + .is_const = true, + .size = .Slice, + }, + }); + const new_val = decl.val; + const new_init = try mod.intern(.{ .ptr = .{ + .ty = new_ty.toIntern(), + .addr = .{ .decl = array_decl_index }, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), + } }); + ip.mutateVarInit(decl.val.toIntern(), new_init); - { - // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); - const new_var = try gpa.create(Var); - errdefer gpa.destroy(new_var); - new_var.* = decl.val.castTag(.variable).?.data.*; - new_var.init = try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), - }); - const new_val = try Value.Tag.variable.create(arena, new_var); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.clearValues(mod); - decl.ty = new_ty; - decl.val = new_val; - decl.has_tv = true; - } - - try decl.finalizeNewArena(&new_decl_arena); + // Since we are replacing the Decl's value we must perform cleanup on the + // previous value. + decl.clearValues(mod); + decl.ty = new_ty; + decl.val = new_val; + decl.has_tv = true; } try mod.linkerUpdateDecl(decl_index); } @@ -6631,7 +6481,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -6673,64 +6523,49 @@ fn reportRetryableFileError( gop.value_ptr.* = err_msg; } -pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - switch (val.tag()) { - .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), - .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), - .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl), - .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl), - .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data), - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data), - - .eu_payload_ptr, - .opt_payload_ptr, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr), - - .slice => { - const slice = val.cast(Value.Payload.Slice).?.data; - mod.markReferencedDeclsAlive(slice.ptr); - mod.markReferencedDeclsAlive(slice.len); +pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| try mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl), + .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()), }, - - .elem_ptr => { - const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data; - return mod.markReferencedDeclsAlive(elem_ptr.array_ptr); - }, - .field_ptr => { - const field_ptr = val.cast(Value.Payload.FieldPtr).?.data; - return mod.markReferencedDeclsAlive(field_ptr.container_ptr); - }, - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| try mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| try mod.markReferencedDeclsAlive(base_index.base.toValue()), } + if (ptr.len != .none) try mod.markReferencedDeclsAlive(ptr.len.toValue()); }, - .@"union" => { - const data = val.cast(Value.Payload.Union).?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); + .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + try mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + try mod.markReferencedDeclsAlive(un.tag.toValue()); + try mod.markReferencedDeclsAlive(un.val.toValue()); }, - else => {}, } } -pub fn markDeclAlive(mod: *Module, decl: *Decl) void { +pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void { if (decl.alive) return; decl.alive = true; + try decl.intern(mod); + // This is the first time we are marking this Decl alive. We must // therefore recurse into its value and mark any Decl it references // as also alive, so that any Decl referenced does not get garbage collected. - mod.markReferencedDeclsAlive(decl.val); + try mod.markReferencedDeclsAlive(decl.val); } -fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void { +fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { return mod.markDeclAlive(mod.declPtr(decl_index)); } @@ -6779,3 +6614,522 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool { .field_reordering => mod.comp.bin_file.options.use_llvm, }; } + +/// Shortcut for calling `intern_pool.get`. +pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index { + return mod.intern_pool.get(mod.gpa, key); +} + +/// Shortcut for calling `intern_pool.getCoerced`. +pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value { + return (try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())).toValue(); +} + +pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { + return (try intern(mod, .{ .int_type = .{ + .signedness = signedness, + .bits = bits, + } })).toType(); +} + +pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { + const i = try intern(mod, .{ .array_type = info }); + return i.toType(); +} + +pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type { + const i = try intern(mod, .{ .vector_type = info }); + return i.toType(); +} + +pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type { + const i = try intern(mod, .{ .opt_type = child_type }); + return i.toType(); +} + +pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { + var canon_info = info; + const have_elem_layout = info.child.toType().layoutIsResolved(mod); + + if (info.flags.size == .C) canon_info.flags.is_allowzero = true; + + // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee + // type, we change it to 0 here. If this causes an assertion trip because the + // pointee type needs to be resolved more, that needs to be done before calling + // this ptr() function. + if (info.flags.alignment.toByteUnitsOptional()) |info_align| { + if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) { + canon_info.flags.alignment = .none; + } + } + + switch (info.flags.vector_index) { + // Canonicalize host_size. If it matches the bit size of the pointee type, + // we change it to 0 here. If this causes an assertion trip, the pointee type + // needs to be resolved before calling this ptr() function. + .none => if (have_elem_layout and info.packed_offset.host_size != 0) { + const elem_bit_size = info.child.toType().bitSize(mod); + assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8); + if (info.packed_offset.host_size * 8 == elem_bit_size) { + canon_info.packed_offset.host_size = 0; + } + }, + .runtime => {}, + _ => assert(@enumToInt(info.flags.vector_index) < info.packed_offset.host_size), + } + + return (try intern(mod, .{ .ptr_type = canon_info })).toType(); +} + +pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .child = child_type.toIntern() }); +} + +pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ + .child = child_type.toIntern(), + .flags = .{ + .is_const = true, + }, + }); +} + +pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ + .child = child_type.toIntern(), + .flags = .{ + .size = .Many, + .is_const = true, + }, + }); +} + +pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { + const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); + return mod.ptrType(.{ + .child = new_child.toIntern(), + .sentinel = info.sentinel, + .flags = info.flags, + .packed_offset = info.packed_offset, + }); +} + +pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { + return (try intern(mod, .{ .func_type = info })).toType(); +} + +/// Use this for `anyframe->T` only. +/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly. +pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType(); +} + +pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .error_union_type = .{ + .error_set_type = error_set_ty.toIntern(), + .payload_type = payload_ty.toIntern(), + } })).toType(); +} + +pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { + const names: *const [1]InternPool.NullTerminatedString = &name; + return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType(); +} + +/// Sorts `names` in place. +pub fn errorSetFromUnsortedNames( + mod: *Module, + names: []InternPool.NullTerminatedString, +) Allocator.Error!Type { + std.mem.sort( + InternPool.NullTerminatedString, + names, + {}, + InternPool.NullTerminatedString.indexLessThan, + ); + const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); + return new_ty.toType(); +} + +/// Supports optionals in addition to pointers. +pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + return mod.getCoerced(try mod.intValue_u64(Type.usize, x), ty); +} + +/// Supports only pointers. See `ptrIntValue` for pointer-like optional support. +pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + assert(ty.zigTypeTag(mod) == .Pointer); + const i = try intern(mod, .{ .ptr = .{ + .ty = ty.toIntern(), + .addr = .{ .int = try mod.intValue_u64(Type.usize, x) }, + } }); + return i.toValue(); +} + +/// Creates an enum tag value based on the integer tag value. +pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value { + if (std.debug.runtime_safety) { + const tag = ty.zigTypeTag(mod); + assert(tag == .Enum); + } + const i = try intern(mod, .{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = tag_int, + } }); + return i.toValue(); +} + +/// Creates an enum tag value based on the field index according to source code +/// declaration order. +pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { + const ip = &mod.intern_pool; + const gpa = mod.gpa; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + + if (enum_type.values.len == 0) { + // Auto-numbered fields. + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = field_index }, + } }), + } })).toValue(); + } + + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = enum_type.values[field_index], + } })).toValue(); +} + +pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + var limbs_buffer: [4]usize = undefined; + var big_int = BigIntMutable.init(&limbs_buffer, x); + return intValue_big(mod, ty, big_int.toConst()); +} + +pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .big_int = x }, + } }); + return i.toValue(); +} + +pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .u64 = x }, + } }); + return i.toValue(); +} + +pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .i64 = x }, + } }); + return i.toValue(); +} + +pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { + const i = try intern(mod, .{ .un = .{ + .ty = union_ty.toIntern(), + .tag = tag.toIntern(), + .val = val.toIntern(), + } }); + return i.toValue(); +} + +/// This function casts the float representation down to the representation of the type, potentially +/// losing data if the representation wasn't correct. +pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) { + 16 => .{ .f16 = @floatCast(f16, x) }, + 32 => .{ .f32 = @floatCast(f32, x) }, + 64 => .{ .f64 = @floatCast(f64, x) }, + 80 => .{ .f80 = @floatCast(f80, x) }, + 128 => .{ .f128 = @floatCast(f128, x) }, + else => unreachable, + }; + const i = try intern(mod, .{ .float = .{ + .ty = ty.toIntern(), + .storage = storage, + } }); + return i.toValue(); +} + +pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { + const ip = &mod.intern_pool; + assert(ip.isOptionalType(opt_ty.toIntern())); + const result = try ip.get(mod.gpa, .{ .opt = .{ + .ty = opt_ty.toIntern(), + .val = .none, + } }); + return result.toValue(); +} + +pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { + return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); +} + +/// Returns the smallest possible integer type containing both `min` and +/// `max`. Asserts that neither value is undef. +/// TODO: if #3806 is implemented, this becomes trivial +pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { + assert(!min.isUndef(mod)); + assert(!max.isUndef(mod)); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, mod).compare(.lte)); + } + + const sign = min.orderAgainstZero(mod) == .lt; + + const min_val_bits = intBitsForValue(mod, min, sign); + const max_val_bits = intBitsForValue(mod, max, sign); + + return mod.intType( + if (sign) .signed else .unsigned, + @max(min_val_bits, max_val_bits), + ); +} + +/// Given a value representing an integer, returns the number of bits necessary to represent +/// this value in an integer. If `sign` is true, returns the number of bits necessary in a +/// twos-complement integer; otherwise in an unsigned integer. +/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. +pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { + assert(!val.isUndef(mod)); + + const key = mod.intern_pool.indexToKey(val.toIntern()); + switch (key.int.storage) { + .i64 => |x| { + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @boolToInt(sign); + assert(sign); + // Protect against overflow in the following negation. + if (x == std.math.minInt(i64)) return 64; + return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + .u64 => |x| { + return Type.smallestUnsignedBits(x) + @boolToInt(sign); + }, + .big_int => |big| { + if (big.positive) return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + + // Zero is still a possibility, in which case unsigned is fine + if (big.eqZero()) return 0; + + return @intCast(u16, big.bitCountTwosComp()); + }, + .lazy_align => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign); + }, + .lazy_size => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign); + }, + } +} + +pub const AtomicPtrAlignmentError = error{ + FloatTooBig, + IntTooBig, + BadType, + OutOfMemory, +}; + +pub const AtomicPtrAlignmentDiagnostics = struct { + bits: u16 = undefined, + max_bits: u16 = undefined, +}; + +/// If ABI alignment of `ty` is OK for atomic operations, returns 0. +/// Otherwise returns the alignment required on a pointer for the target +/// to perform atomic operations. +// TODO this function does not take into account CPU features, which can affect +// this value. Audit this! +pub fn atomicPtrAlignment( + mod: *Module, + ty: Type, + diags: *AtomicPtrAlignmentDiagnostics, +) AtomicPtrAlignmentError!u32 { + const target = mod.getTarget(); + const max_atomic_bits: u16 = switch (target.cpu.arch) { + .avr, + .msp430, + .spu_2, + => 16, + + .arc, + .arm, + .armeb, + .hexagon, + .m68k, + .le32, + .mips, + .mipsel, + .nvptx, + .powerpc, + .powerpcle, + .r600, + .riscv32, + .sparc, + .sparcel, + .tce, + .tcele, + .thumb, + .thumbeb, + .x86, + .xcore, + .amdil, + .hsail, + .spir, + .kalimba, + .lanai, + .shave, + .wasm32, + .renderscript32, + .csky, + .spirv32, + .dxil, + .loongarch32, + .xtensa, + => 32, + + .amdgcn, + .bpfel, + .bpfeb, + .le64, + .mips64, + .mips64el, + .nvptx64, + .powerpc64, + .powerpc64le, + .riscv64, + .sparc64, + .s390x, + .amdil64, + .hsail64, + .spir64, + .wasm64, + .renderscript64, + .ve, + .spirv64, + .loongarch64, + => 64, + + .aarch64, + .aarch64_be, + .aarch64_32, + => 128, + + .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, + }; + + const int_ty = switch (ty.zigTypeTag(mod)) { + .Int => ty, + .Enum => ty.intTagType(mod), + .Float => { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + return 0; + }, + .Bool => return 0, + else => { + if (ty.isPtrAtRuntime(mod)) return 0; + return error.BadType; + }, + }; + + const bit_count = int_ty.intInfo(mod).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + + return 0; +} + +pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { + return mod.declPtr(opaque_type.decl).srcLoc(mod); +} + +pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString { + return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod); +} + +pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { + return mod.declPtr(decl_index).getFileScope(mod); +} + +pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { + return mod.namespacePtr(namespace_index).getDeclIndex(mod); +} + +/// Returns null in the following cases: +/// * `@TypeOf(.{})` +/// * A struct which has no fields (`struct {}`). +/// * Not a struct. +pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { + if (ty.ip_index == .none) return null; + const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null; + return mod.structPtr(struct_index); +} + +pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { + if (ty.ip_index == .none) return null; + const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null; + return mod.unionPtr(union_index); +} + +pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { + if (ty.ip_index == .none) return null; + return mod.intern_pool.indexToFuncType(ty.toIntern()); +} + +pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { + const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; + return mod.inferredErrorSetPtr(index); +} + +pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { + if (ty.ip_index == .none) return .none; + return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); +} + +pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const owner_decl = mod.declPtr(owner_decl_index); + const file = owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + file.sub_file_path, @errorName(err), + }); + return owner_decl.srcLoc(mod); + }; + const node = owner_decl.relativeToNodeIndex(0); + var buf: [2]Ast.Node.Index = undefined; + if (tree.fullContainerDecl(&buf, node)) |container_decl| { + return queryFieldSrc(tree.*, query, file, container_decl); + } else { + // This type was generated using @Type + return owner_decl.srcLoc(mod); + } +} + +pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { + return mod.intern_pool.toEnum(E, val.toIntern()); +} diff --git a/src/RangeSet.zig b/src/RangeSet.zig index aa051ff424..f808322fc7 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -1,18 +1,18 @@ const std = @import("std"); +const assert = std.debug.assert; const Order = std.math.Order; -const RangeSet = @This(); +const InternPool = @import("InternPool.zig"); const Module = @import("Module.zig"); +const RangeSet = @This(); const SwitchProngSrc = @import("Module.zig").SwitchProngSrc; -const Type = @import("type.zig").Type; -const Value = @import("value.zig").Value; ranges: std.ArrayList(Range), module: *Module, pub const Range = struct { - first: Value, - last: Value, + first: InternPool.Index, + last: InternPool.Index, src: SwitchProngSrc, }; @@ -29,18 +29,27 @@ pub fn deinit(self: *RangeSet) void { pub fn add( self: *RangeSet, - first: Value, - last: Value, - ty: Type, + first: InternPool.Index, + last: InternPool.Index, src: SwitchProngSrc, ) !?SwitchProngSrc { + const mod = self.module; + const ip = &mod.intern_pool; + + const ty = ip.typeOf(first); + assert(ty == ip.typeOf(last)); + for (self.ranges.items) |range| { - if (last.compareAll(.gte, range.first, ty, self.module) and - first.compareAll(.lte, range.last, ty, self.module)) + assert(ty == ip.typeOf(range.first)); + assert(ty == ip.typeOf(range.last)); + + if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and + first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod)) { return range.src; // They overlap. } } + try self.ranges.append(.{ .first = first, .last = last, @@ -49,45 +58,43 @@ pub fn add( return null; } -const LessThanContext = struct { ty: Type, module: *Module }; - /// Assumes a and b do not overlap -fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compareAll(.lt, b.first, ctx.ty, ctx.module); +fn lessThan(mod: *Module, a: Range, b: Range) bool { + const ty = mod.intern_pool.typeOf(a.first).toType(); + return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod); } -pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { +pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool { + const mod = self.module; + const ip = &mod.intern_pool; + assert(ip.typeOf(first) == ip.typeOf(last)); + if (self.ranges.items.len == 0) return false; - std.mem.sort(Range, self.ranges.items, LessThanContext{ - .ty = ty, - .module = self.module, - }, lessThan); + std.mem.sort(Range, self.ranges.items, mod, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, self.module) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module)) + if (self.ranges.items[0].first != first or + self.ranges.items[self.ranges.items.len - 1].last != last) { return false; } - var space: Value.BigIntSpace = undefined; + var space: InternPool.Key.Int.Storage.BigIntSpace = undefined; var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); - const target = self.module.getTarget(); - // look for gaps for (self.ranges.items[1..], 0..) |cur, i| { // i starts counting from the second item. const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(prev.last.toBigInt(&space, target)); + try counter.copy(prev.last.toValue().toBigInt(&space, mod)); try counter.addScalar(&counter, 1); - const cur_start_int = cur.first.toBigInt(&space, target); + const cur_start_int = cur.first.toValue().toBigInt(&space, mod); if (!cur_start_int.eq(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index 9e21bfa83d..aa04c40fd0 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -11,13 +11,9 @@ gpa: Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: Allocator, -/// Points to the arena allocator for the owner_decl. -/// This arena will persist until the decl is invalidated. -perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, -air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -28,10 +24,12 @@ owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, +owner_func_index: Module.Fn.OptionalIndex, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, +func_index: Module.Fn.OptionalIndex, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body @@ -65,12 +63,15 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. -preallocated_new_func: ?*Module.Fn = null, -/// The key is `constant` AIR instructions to types that must be fully resolved -/// after the current function body analysis is done. -/// TODO: after upgrading to use InternPool change the key here to be an -/// InternPool value index. -types_to_resolve: std.ArrayListUnmanaged(Air.Inst.Ref) = .{}, +preallocated_new_func: Module.Fn.OptionalIndex = .none, +/// The key is types that must be fully resolved prior to machine code +/// generation pass. Types are added to this set when resolving them +/// immediately could cause a dependency loop, but they do need to be resolved +/// before machine code generation passes process the AIR. +/// It would work fine if this were an array list instead of an array hash map. +/// I chose array hash map with the intention to save time by omitting +/// duplicates. +types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -84,12 +85,22 @@ is_generic_instantiation: bool = false, /// function types will emit generic poison instead of a partial type. no_partial_func_ty: bool = false, -unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, +/// The temporary arena is used for the memory of the `InferredAlloc` values +/// here so the values can be dropped without any cleanup. +unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, + +/// Indices of comptime-mutable decls created by this Sema. These decls' values +/// should be interned after analysis completes, as they may refer to memory in +/// the Sema arena. +/// TODO: this is a workaround for memory bugs triggered by the removal of +/// Decl.value_arena. A better solution needs to be found. Probably this will +/// involve transitioning comptime-mutable memory away from using Decls at all. +comptime_mutable_decls: *std.ArrayList(Decl.Index), const std = @import("std"); const math = std.math; const mem = std.mem; -const Allocator = std.mem.Allocator; +const Allocator = mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); @@ -114,6 +125,7 @@ const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); +const InternPool = @import("InternPool.zig"); pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; @@ -226,7 +238,7 @@ pub const Block = struct { sema: *Sema, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namespace. - namespace: *Namespace, + namespace: Namespace.Index, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. @@ -285,6 +297,7 @@ pub const Block = struct { fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { const parent = msg orelse return; + const mod = sema.mod; const prefix = "expression is evaluated at comptime because "; switch (cr) { .c_import => |ci| { @@ -292,21 +305,21 @@ pub const Block = struct { }, .comptime_ret_ty => |rt| { const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: { - var src_loc = fn_decl.srcLoc(); + var src_loc = fn_decl.srcLoc(mod); src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; break :blk src_loc; } else blk: { - const src_decl = sema.mod.declPtr(rt.block.src_decl); - break :blk rt.func_src.toSrcLoc(src_decl); + const src_decl = mod.declPtr(rt.block.src_decl); + break :blk rt.func_src.toSrcLoc(src_decl, mod); }; - if (rt.return_ty.tag() == .generic_poison) { - return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); + if (rt.return_ty.isGenericPoison()) { + return mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); } - try sema.mod.errNoteNonLazy( + try mod.errNoteNonLazy( src_loc, parent, prefix ++ "the function returns a comptime-only type '{}'", - .{rt.return_ty.fmt(sema.mod)}, + .{rt.return_ty.fmt(mod)}, ); try sema.explainWhyTypeIsComptime(parent, src_loc, rt.return_ty); }, @@ -398,8 +411,8 @@ pub const Block = struct { }; } - pub fn getFileScope(block: *Block) *Module.File { - return block.namespace.file_scope; + pub fn getFileScope(block: *Block, mod: *Module) *Module.File { + return mod.namespacePtr(block.namespace).file_scope; } fn addTy( @@ -584,13 +597,18 @@ pub const Block = struct { } fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { + const sema = block.sema; + const mod = sema.mod; return block.addInst(.{ .tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ - .ty = try block.sema.addType( - try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool), + .ty = try sema.addType( + try mod.vectorType(.{ + .len = sema.typeOf(lhs).vectorLen(mod), + .child = .bool_type, + }), ), - .payload = try block.sema.addExtra(Air.VectorCmp{ + .payload = try sema.addExtra(Air.VectorCmp{ .lhs = lhs, .rhs = rhs, .op = Air.VectorCmp.encodeOp(cmp_op), @@ -684,29 +702,20 @@ pub const Block = struct { pub fn startAnonDecl(block: *Block) !WipAnonDecl { return WipAnonDecl{ .block = block, - .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; } pub const WipAnonDecl = struct { block: *Block, - new_decl_arena: std.heap.ArenaAllocator, finished: bool, - pub fn arena(wad: *WipAnonDecl) Allocator { - return wad.new_decl_arena.allocator(); - } - pub fn deinit(wad: *WipAnonDecl) void { - if (!wad.finished) { - wad.new_decl_arena.deinit(); - } wad.* = undefined; } /// `alignment` value of 0 means to use ABI alignment. - pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u32) !Decl.Index { + pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u64) !Decl.Index { const sema = wad.block.sema; // Do this ahead of time because `createAnonymousDecl` depends on calling // `type.hasRuntimeBits()`. @@ -716,10 +725,11 @@ pub const Block = struct { .val = val, }); const new_decl = sema.mod.declPtr(new_decl_index); - new_decl.@"align" = alignment; + // TODO: migrate Decl alignment to use `InternPool.Alignment` + new_decl.@"align" = @intCast(u32, alignment); errdefer sema.mod.abortAnonDecl(new_decl_index); - try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; + try sema.mod.finalizeAnonDecl(new_decl_index); return new_decl_index; } }; @@ -736,11 +746,27 @@ const LabeledBlock = struct { } }; +/// The value stored in the inferred allocation. This will go into +/// peer type resolution. This is stored in a separate list so that +/// the items are contiguous in memory and thus can be passed to +/// `Module.resolvePeerTypes`. +const InferredAlloc = struct { + prongs: std.MultiArrayList(struct { + /// The dummy instruction used as a peer to resolve the type. + /// Although this has a redundant type with placeholder, this is + /// needed in addition because it may be a constant value, which + /// affects peer type resolution. + stored_inst: Air.Inst.Ref, + /// The bitcast instruction used as a placeholder when the + /// new result pointer type is not yet known. + placeholder: Air.Inst.Index, + }) = .{}, +}; + pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); - sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.types_to_resolve.deinit(gpa); @@ -823,7 +849,7 @@ pub fn analyzeBodyBreak( else => |e| return e, }; if (block.instructions.items.len != 0 and - sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn()) + sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))) return null; const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; @@ -858,18 +884,20 @@ fn analyzeBodyInner( try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body); + // Most of the time, we don't need to construct a new capture scope for a + // block. However, successive iterations of comptime loops can capture + // different values for the same Zir.Inst.Index, so in those cases, we will + // have to create nested capture scopes; see the `.repeat` case below. const parent_capture_scope = block.wip_capture_scope; - - var wip_captures = WipCaptureScope{ - .finalized = true, + parent_capture_scope.incRef(); + var wip_captures: WipCaptureScope = .{ .scope = parent_capture_scope, - .perm_arena = sema.perm_arena, .gpa = sema.gpa, + .finalized = true, // don't finalize the parent scope }; - defer if (wip_captures.scope != parent_capture_scope) { - wip_captures.deinit(); - }; + defer wip_captures.deinit(); + const mod = sema.mod; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); @@ -890,15 +918,15 @@ fn analyzeBodyInner( crash_info.setBodyIndex(i); const inst = body[i]; std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ - sema.mod.declPtr(block.src_decl).src_namespace.file_scope.sub_file_path, inst, + mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst, }); const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)), + .alloc_inferred => try sema.zirAllocInferred(block, inst, true), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, false), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, true), + .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, false), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime_mut => try sema.zirAllocComptime(block, inst), .make_ptr_const => try sema.zirMakePtrConst(block, inst), @@ -962,7 +990,7 @@ fn analyzeBodyInner( .int_big => try sema.zirIntBig(block, inst), .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), + .int_type => try sema.zirIntType(inst), .is_non_err => try sema.zirIsNonErr(block, inst), .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), .ret_is_non_err => try sema.zirRetIsNonErr(block, inst), @@ -1420,6 +1448,11 @@ fn analyzeBodyInner( const src = LazySrcLoc.nodeOffset(datas[inst].node); try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { + // We need to construct new capture scopes for the next loop iteration so it + // can capture values without clobbering the earlier iteration's captures. + // At first, we reused the parent capture scope as an optimization, but for + // successive scopes we have to create new ones as children of the parent + // scope. try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; @@ -1435,6 +1468,11 @@ fn analyzeBodyInner( const src = LazySrcLoc.nodeOffset(datas[inst].node); try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { + // We need to construct new capture scopes for the next loop iteration so it + // can capture values without clobbering the earlier iteration's captures. + // At first, we reused the parent capture scope as an optimization, but for + // successive scopes we have to create new ones as children of the parent + // scope. try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; @@ -1621,18 +1659,18 @@ fn analyzeBodyInner( const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1654,11 +1692,11 @@ fn analyzeBodyInner( const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1684,7 +1722,7 @@ fn analyzeBodyInner( const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data; const defer_body = sema.code.extra[extra.index..][0..extra.len]; const err_code = try sema.resolveInst(inst_data.err_code); - sema.inst_map.putAssumeCapacity(extra.remapped_err_code, err_code); + map.putAssumeCapacity(extra.remapped_err_code, err_code); const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, @@ -1693,8 +1731,12 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.isNoReturn(air_inst)) { + // We're going to assume that the body itself is noreturn, so let's ensure that now + assert(block.instructions.items.len > 0); + assert(sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))); break always_noreturn; + } map.putAssumeCapacity(inst, air_inst); i += 1; }; @@ -1703,7 +1745,7 @@ fn analyzeBodyInner( const noreturn_inst = block.instructions.popOrNull(); while (dbg_block_begins > 0) { dbg_block_begins -= 1; - if (block.is_comptime or sema.mod.comp.bin_file.options.strip) continue; + if (block.is_comptime or mod.comp.bin_file.options.strip) continue; _ = try block.addInst(.{ .tag = .dbg_block_end, @@ -1713,6 +1755,8 @@ fn analyzeBodyInner( if (noreturn_inst) |some| try block.instructions.append(sema.gpa, some); if (!wip_captures.finalized) { + // We've updated the capture scope due to a `repeat` instruction where + // the body had a capture; finalize our child scope and reset try wip_captures.finalize(); block.wip_capture_scope = parent_capture_scope; } @@ -1720,20 +1764,23 @@ fn analyzeBodyInner( return result; } -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { - var i: usize = @enumToInt(zir_ref); - - // First section of indexes correspond to a set number of constant values. - if (i < Zir.Inst.Ref.typed_value_map.len) { - // We intentionally map the same indexes to the same values between ZIR and AIR. - return zir_ref; +pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { + if (zir_ref == .none) { + return .none; + } else { + return resolveInst(sema, zir_ref); } - i -= Zir.Inst.Ref.typed_value_map.len; +} - // Finally, the last section of indexes refers to the map of ZIR=>AIR. - const inst = sema.inst_map.get(@intCast(u32, i)).?; - const ty = sema.typeOf(inst); - if (ty.tag() == .generic_poison) return error.GenericPoison; +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { + assert(zir_ref != .none); + const i = @enumToInt(zir_ref); + // First section of indexes correspond to a set number of constant values. + // We intentionally map the same indexes to the same values between ZIR and AIR. + if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); + // The last section of indexes refers to the map of ZIR => AIR. + const inst = sema.inst_map.get(i - InternPool.static_len).?; + if (inst == .generic_poison) return error.GenericPoison; return inst; } @@ -1759,18 +1806,31 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.initTag(.const_slice_u8); + const wanted_type = Type.slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); } -pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - assert(zir_ref != .var_args_param); +pub fn resolveConstStringIntern( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, + reason: []const u8, +) !InternPool.NullTerminatedString { const air_inst = try sema.resolveInst(zir_ref); - assert(air_inst != .var_args_param); + const wanted_type = Type.slice_const_u8; + const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); + const val = try sema.resolveConstValue(block, src, coerced_inst, reason); + return val.toIpString(wanted_type, sema.mod); +} + +pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { + const air_inst = try sema.resolveInst(zir_ref); + assert(air_inst != .var_args_param_type); const ty = try sema.analyzeAsType(block, src, air_inst); - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; return ty; } @@ -1780,45 +1840,48 @@ fn analyzeAsType( src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { - const wanted_type = Type.initTag(.type); + const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = val.toType(&buffer); - return ty.copy(sema.arena); + return val.toType(); } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); - defer err_trace_block.instructions.deinit(sema.gpa); + defer err_trace_block.instructions.deinit(gpa); const src: LazySrcLoc = .unneeded; // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; - const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, sema.mod); - const addrs_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, addr_arr_ty)); + const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod); + const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const st_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty)); + const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses"); + const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true); + const index_field_name = try ip.getOrPutString(gpa, "index"); + const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); // @errorReturnTrace() = &st; _ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr); - try block.instructions.insertSlice(sema.gpa, last_arg_index, err_trace_block.instructions.items); + try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items); } /// May return Value Tags: `variable`, `undef`. @@ -1832,7 +1895,7 @@ fn resolveValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - if (val.tag() == .generic_poison) return error.GenericPoison; + if (val.isGenericPoison()) return error.GenericPoison; return val; } return sema.failWithNeededComptime(block, src, reason); @@ -1848,10 +1911,12 @@ fn resolveConstMaybeUndefVal( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { - switch (val.tag()) { - .variable => return sema.failWithNeededComptime(block, src, reason), + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .variable => return sema.failWithNeededComptime(block, src, reason), + else => return val, + }, } } return sema.failWithNeededComptime(block, src, reason); @@ -1867,16 +1932,31 @@ fn resolveConstValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - switch (val.tag()) { - .undef => return sema.failWithUseOfUndef(block, src), - .variable => return sema.failWithNeededComptime(block, src, reason), + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => return val, + .undef => return sema.failWithUseOfUndef(block, src), + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .undef => return sema.failWithUseOfUndef(block, src), + .variable => return sema.failWithNeededComptime(block, src, reason), + else => return val, + }, } } return sema.failWithNeededComptime(block, src, reason); } +/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors. +/// Lazy values are recursively resolved. +fn resolveConstLazyValue( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, + reason: []const u8, +) CompileError!Value { + return sema.resolveLazyValue(try sema.resolveConstValue(block, src, air_ref, reason)); +} + /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return a compile error. fn resolveDefinedValue( @@ -1885,8 +1965,9 @@ fn resolveDefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(air_ref)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { if (block.is_typeof) return null; return sema.failWithUseOfUndef(block, src); } @@ -1903,34 +1984,53 @@ fn resolveMaybeUndefVal( inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; - switch (val.tag()) { - .variable => return null, + switch (val.ip_index) { .generic_poison => return error.GenericPoison, - else => return val, + .none => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .variable => return null, + else => return val, + }, } } +/// Value Tag `variable` causes this function to return `null`. +/// Value Tag `undef` causes this function to return the Value. +/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. +/// Lazy values are recursively resolved. +fn resolveMaybeUndefLazyVal( + sema: *Sema, + inst: Air.Inst.Ref, +) CompileError!?Value { + return try sema.resolveLazyValue((try sema.resolveMaybeUndefVal(inst)) orelse return null); +} + /// Value Tag `variable` results in `null`. /// Value Tag `undef` results in the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. /// Value Tag `decl_ref` and `decl_ref_mut` or any nested such value results in `null`. +/// Lazy values are recursively resolved. fn resolveMaybeUndefValIntable( sema: *Sema, inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, - else => { - try sema.resolveLazyValue(val); - return val; + .none => break, + else => switch (sema.mod.intern_pool.indexToKey(check.toIntern())) { + .variable => return null, + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return null, + .int => break, + .eu_payload, .opt_payload => |base| check = base.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), + }, + else => break, }, }; + return try sema.resolveLazyValue(val); } /// Returns all Value tags including `variable` and `undef`. @@ -1949,35 +2049,33 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( inst: Air.Inst.Ref, make_runtime: *bool, ) CompileError!?Value { + assert(inst != .none); // First section of indexes correspond to a set number of constant values. - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val; + const int = @enumToInt(inst); + if (int < InternPool.static_len) { + return @intToEnum(InternPool.Index, int).toValue(); } - i -= Air.Inst.Ref.typed_value_map.len; + const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { - if (air_tags[i] == .constant) { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .variable) return val; + if (air_tags[i] == .interned) { + const interned = sema.air_instructions.items(.data)[i].interned; + const val = interned.toValue(); + if (val.getVariable(sema.mod) != null) return val; } return opv; } - switch (air_tags[i]) { - .constant => { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .runtime_value) make_runtime.* = true; - if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; - return val; - }, - .const_ty => { - return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); - }, + const air_datas = sema.air_instructions.items(.data); + const val = switch (air_tags[i]) { + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + .interned => air_datas[i].interned.toValue(), else => return null, - } + }; + if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; + if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; + return val; } fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: []const u8) CompileError { @@ -2010,13 +2108,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, opt } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - if (ty.isSlice()) { - try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2().fmt(sema.mod)}); + if (ty.isSlice(mod)) { + try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; }; @@ -2042,7 +2141,8 @@ fn failWithErrorSetCodeMissing( } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { - if (int_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (int_ty.zigTypeTag(mod) == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{ int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod), @@ -2059,16 +2159,17 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: } fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); - const struct_ty = container_ty.castTag(.@"struct") orelse break :msg msg; - const default_value_src = struct_ty.data.fieldSrcLoc(sema.mod, .{ + const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; + const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{ .index = field_index, .range = .value, }); - try sema.mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); + try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -2083,13 +2184,19 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError return sema.failWithOwnedErrorMsg(msg); } -fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { - const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty; +fn failWithInvalidFieldAccess( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + object_ty: Type, + field_name: InternPool.NullTerminatedString, +) CompileError { + const mod = sema.mod; + const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; - if (inner_ty.zigTypeTag() == .Optional) opt: { - var buf: Type.Payload.ElemType = undefined; - const child_ty = inner_ty.optionalChild(&buf); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :opt; + if (inner_ty.zigTypeTag(mod) == .Optional) opt: { + const child_ty = inner_ty.optionalChild(mod); + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2097,9 +2204,9 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (inner_ty.zigTypeTag() == .ErrorUnion) err: { - const child_ty = inner_ty.errorUnionPayload(); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :err; + } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { + const child_ty = inner_ty.errorUnionPayload(mod); + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2111,15 +2218,16 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(ty: Type, field_name: []const u8) bool { - switch (ty.zigTypeTag()) { - .Array => return mem.eql(u8, field_name, "len"), +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool { + const ip = &mod.intern_pool; + switch (ty.zigTypeTag(mod)) { + .Array => return ip.stringEqlSlice(field_name, "len"), .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); - } else if (ptr_info.pointee_type.zigTypeTag() == .Array) { - return mem.eql(u8, field_name, "len"); + return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len"); + } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { + return ip.stringEqlSlice(field_name, "len"); } else return false; }, .Type, .Struct, .Union => return true, @@ -2139,7 +2247,7 @@ fn errNote( ) error{OutOfMemory}!void { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return mod.errNoteNonLazy(src.toSrcLoc(src_decl), parent, format, args); + return mod.errNoteNonLazy(src.toSrcLoc(src_decl, mod), parent, format, args); } fn addFieldErrNote( @@ -2152,19 +2260,19 @@ fn addFieldErrNote( ) !void { @setCold(true); const mod = sema.mod; - const decl_index = container_ty.getOwnerDecl(); + const decl_index = container_ty.getOwnerDecl(mod); const decl = mod.declPtr(decl_index); const field_src = blk: { - const tree = decl.getFileScope().getTree(sema.gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); - break :blk decl.srcLoc(); + break :blk decl.srcLoc(mod); }; const container_node = decl.relativeToNodeIndex(0); const node_tags = tree.nodes.items(.tag); var buf: [2]std.zig.Ast.Node.Index = undefined; - const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(); + const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod); var it_index: usize = 0; for (container_decl.ast.members) |member_node| { @@ -2174,7 +2282,7 @@ fn addFieldErrNote( .container_field, => { if (it_index == field_index) { - break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node)); + break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod); } it_index += 1; }, @@ -2195,7 +2303,7 @@ fn errMsg( ) error{OutOfMemory}!*Module.ErrorMsg { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl), format, args); + return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl, mod), format, args); } pub fn fail( @@ -2212,19 +2320,19 @@ pub fn fail( fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); const gpa = sema.gpa; + const mod = sema.mod; - if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { + if (crash_report.is_enabled and mod.comp.debug_compile_errors) { if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(&wip_errors, err_msg.*) catch unreachable; + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); crash_report.compilerPanic("unexpected compile error occurred", null, null); } - const mod = sema.mod; ref: { errdefer err_msg.destroy(gpa); if (err_msg.src_loc.lazy == .unneeded) { @@ -2234,9 +2342,9 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { try mod.failed_files.ensureUnusedCapacity(gpa, 1); const max_references = blk: { - if (sema.mod.comp.reference_trace) |num| break :blk num; + if (mod.comp.reference_trace) |num| break :blk num; // Do not add multiple traces without explicit request. - if (sema.mod.failed_decls.count() != 0) break :ref; + if (mod.failed_decls.count() != 0) break :ref; break :blk default_reference_trace_len; }; @@ -2245,7 +2353,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { defer reference_stack.deinit(); // Avoid infinite loops. - var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa); + var seen = std.AutoHashMap(Decl.Index, void).init(gpa); defer seen.deinit(); var cur_reference_trace: u32 = 0; @@ -2254,13 +2362,16 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { if (gop.found_existing) break; if (cur_reference_trace < max_references) { const decl = sema.mod.declPtr(ref.referencer); - try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl) }); + try reference_stack.append(.{ + .decl = decl.name.toOptional(), + .src_loc = ref.src.toSrcLoc(decl, mod), + }); } referenced_by = ref.referencer; } if (sema.mod.comp.reference_trace == null and cur_reference_trace > 0) { try reference_stack.append(.{ - .decl = null, + .decl = .none, .src_loc = undefined, .hidden = 0, }); @@ -2352,10 +2463,10 @@ fn analyzeAsInt( dest_ty: Type, reason: []const u8, ) !u64 { + const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - const target = sema.mod.getTarget(); - return (try val.getUnsignedIntAdvanced(target, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, sema)).?; } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for @@ -2396,73 +2507,77 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const pointee_ty = try sema.resolveType(block, src, extra.lhs); const ptr = try sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { - if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { - .inferred_alloc => { - const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; - // Add the stored instruction to the set we will use to resolve peer types - // for the inferred allocation. - // This instruction will not make it to codegen; it is only to participate - // in the `stored_inst_list` of the `inferred_alloc`. - var trash_block = block.makeSubBlock(); - defer trash_block.instructions.deinit(sema.gpa); - const operand = try trash_block.addBitCast(pointee_ty, .void_value); + switch (sema.air_instructions.items(.tag)[ptr_inst]) { + .inferred_alloc => { + const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc; + const ia2 = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + // Add the stored instruction to the set we will use to resolve peer types + // for the inferred allocation. + // This instruction will not make it to codegen; it is only to participate + // in the `stored_inst_list` of the `inferred_alloc`. + var trash_block = block.makeSubBlock(); + defer trash_block.instructions.deinit(sema.gpa); + const operand = try trash_block.addBitCast(pointee_ty, .void_value); - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = inferred_alloc.alignment, - .@"addrspace" = addr_space, - }); - const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); + const ptr_ty = try mod.ptrType(.{ + .child = pointee_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = addr_space, + }, + }); + const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); - try inferred_alloc.prongs.append(sema.arena, .{ - .stored_inst = operand, - .placeholder = Air.refToIndex(bitcasted_ptr).?, - }); + try ia2.prongs.append(sema.arena, .{ + .stored_inst = operand, + .placeholder = Air.refToIndex(bitcasted_ptr).?, + }); - return bitcasted_ptr; - }, - .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - // There will be only one coerce_result_ptr because we are running at comptime. - // The alloc will turn into a Decl. - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - try pointee_ty.copy(anon_decl.arena()), - Value.undef, - iac.data.alignment, - ); - if (iac.data.alignment != 0) { - try sema.resolveTypeLayout(pointee_ty); - } - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = iac.data.alignment, - .@"addrspace" = addr_space, - }); - try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant( - ptr_ty, - try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = iac.data.decl_index, - .runtime_index = block.runtime_index, - }), - ); - }, - else => {}, - } + return bitcasted_ptr; + }, + .inferred_alloc_comptime => { + const alignment = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.alignment; + // There will be only one coerce_result_ptr because we are running at comptime. + // The alloc will turn into a Decl. + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + const decl_index = try anon_decl.finish( + pointee_ty, + (try mod.intern(.{ .undef = pointee_ty.toIntern() })).toValue(), + alignment.toByteUnits(0), + ); + sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.decl_index = decl_index; + if (alignment != .none) { + try sema.resolveTypeLayout(pointee_ty); + } + const ptr_ty = try mod.ptrType(.{ + .child = pointee_ty.toIntern(), + .flags = .{ + .alignment = alignment, + .address_space = addr_space, + }, + }); + try sema.maybeQueueFuncBodyAnalysis(decl_index); + try sema.comptime_mutable_decls.append(decl_index); + return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); + }, + else => {}, } } @@ -2487,6 +2602,7 @@ fn coerceResultPtr( dummy_operand: Air.Inst.Ref, trash_block: *Block, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); const pointee_ty = sema.typeOf(dummy_operand); @@ -2530,7 +2646,7 @@ fn coerceResultPtr( return sema.addConstant(ptr_ty, ptr_val); } if (pointee_ty.eql(Type.null, sema.mod)) { - const opt_ty = sema.typeOf(new_ptr).childType(); + const opt_ty = sema.typeOf(new_ptr).childType(mod); const null_inst = try sema.addConstant(opt_ty, Value.null); _ = try block.addBinOp(.store, new_ptr, null_inst); return Air.Inst.Ref.void_value; @@ -2563,7 +2679,7 @@ fn coerceResultPtr( .@"addrspace" = addr_space, }); if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| { - new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val); + new_ptr = try sema.addConstant(ptr_operand_ty, try mod.getCoerced(ptr_val, ptr_operand_ty)); } else { new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null); } @@ -2600,8 +2716,10 @@ pub fn analyzeStructDecl( sema: *Sema, new_decl: *Decl, inst: Zir.Inst.Index, - struct_obj: *Module.Struct, + struct_index: Module.Struct.Index, ) SemaError!void { + const mod = sema.mod; + const struct_obj = mod.structPtr(struct_index); const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -2630,7 +2748,7 @@ pub fn analyzeStructDecl( } } - _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( @@ -2639,28 +2757,35 @@ fn zirStructDecl( extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. - const mod = sema.mod; - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = struct_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -2668,18 +2793,25 @@ fn zirStructDecl( .status = .none, .known_non_opv = undefined, .is_tuple = small.is_tuple, - .namespace = .{ - .parent = block.namespace, - .ty = struct_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ - &struct_obj.namespace, new_decl, new_decl.name, + .namespace = new_namespace_index, }); - try sema.analyzeStructDecl(new_decl, inst, struct_obj); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(struct_ty); + + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); + + try sema.analyzeStructDecl(new_decl, inst, struct_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn createAnonymousDeclTypeNamed( @@ -2692,6 +2824,7 @@ fn createAnonymousDeclTypeNamed( inst: ?Zir.Inst.Index, ) !Decl.Index { const mod = sema.mod; + const gpa = sema.gpa; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2707,16 +2840,15 @@ fn createAnonymousDeclTypeNamed( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{ - src_decl.name, anon_prefix, @enumToInt(new_decl_index), - }); - errdefer sema.gpa.free(name); + + const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ + src_decl.name.fmt(&mod.intern_pool), anon_prefix, @enumToInt(new_decl_index), + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, .parent => { - const name = try sema.gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - errdefer sema.gpa.free(name); + const name = mod.declPtr(block.src_decl).name; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2724,10 +2856,11 @@ fn createAnonymousDeclTypeNamed( const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); - var buf = std.ArrayList(u8).init(sema.gpa); + var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); - try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - try buf.appendSlice("("); + + const writer = buf.writer(); + try writer.print("{}(", .{mod.declPtr(block.src_decl).name.fmt(&mod.intern_pool)}); var arg_i: usize = 0; for (fn_info.param_body) |zir_inst| switch (zir_tags[zir_inst]) { @@ -2741,8 +2874,8 @@ fn createAnonymousDeclTypeNamed( const arg_val = sema.resolveConstMaybeUndefVal(block, .unneeded, arg, "") catch return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null); - if (arg_i != 0) try buf.appendSlice(","); - try buf.writer().print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); + if (arg_i != 0) try writer.writeByte(','); + try writer.print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); arg_i += 1; continue; @@ -2750,9 +2883,8 @@ fn createAnonymousDeclTypeNamed( else => continue, }; - try buf.appendSlice(")"); - const name = try buf.toOwnedSliceSentinel(0); - errdefer sema.gpa.free(name); + try writer.writeByte(')'); + const name = try mod.intern_pool.getOrPutString(gpa, buf.items); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2765,10 +2897,9 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}.{s}", .{ - src_decl.name, zir_data[i].str_op.getStr(sema.code), + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}.{s}", .{ + src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code), }); - errdefer sema.gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -2825,53 +2956,28 @@ fn zirEnumDecl( break :blk decls_len; } else 0; + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains an + // InternPool index. + var done = false; - - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer if (!done) new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer if (!done) mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = true, - .fields = .{}, - .values = .{}, - .namespace = .{ - .parent = block.namespace, - .ty = enum_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ - &enum_obj.namespace, new_decl, new_decl.name, + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer if (!done) mod.destroyNamespace(new_namespace_index); - try new_decl.finalizeNewArena(&new_decl_arena); - const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); - done = true; - - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = new_decl.value_arena.?.acquire(gpa, &decl_arena); - defer new_decl.value_arena.?.release(&decl_arena); - - extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -2880,7 +2986,34 @@ fn zirEnumDecl( const body_end = extra_index; extra_index += bit_bags_count; - { + const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { + if (bag != 0) break true; + } else false; + + const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = new_namespace_index.toOptional(), + .fields_len = fields_len, + .has_values = any_values, + .tag_mode = if (small.nonexhaustive) + .nonexhaustive + else if (tag_type_ref == .none) + .auto + else + .explicit, + }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); + + new_decl.ty = Type.type; + new_decl.val = incomplete_enum.index.toValue(); + new_namespace.ty = incomplete_enum.index.toType(); + + const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + done = true; + + const int_tag_ty = ty: { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. // Within the field type, default value, and alignment expressions, the "owner decl" @@ -2896,21 +3029,27 @@ fn zirEnumDecl( } const prev_owner_func = sema.owner_func; + const prev_owner_func_index = sema.owner_func_index; sema.owner_func = null; + sema.owner_func_index = .none; defer sema.owner_func = prev_owner_func; + defer sema.owner_func_index = prev_owner_func_index; const prev_func = sema.func; + const prev_func_index = sema.func_index; sema.func = null; + sema.func_index = .none; defer sema.func = prev_func; + defer sema.func_index = prev_func_index; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); defer wip_captures.deinit(); var enum_block: Block = .{ .parent = null, .sema = sema, .src_decl = new_decl_index, - .namespace = &enum_obj.namespace, + .namespace = new_namespace_index, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -2926,43 +3065,29 @@ fn zirEnumDecl( if (tag_type_ref != .none) { const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) { + if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - enum_obj.tag_ty = try ty.copy(decl_arena_allocator); - enum_obj.tag_ty_inferred = false; + incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern()); + break :ty ty; } else if (fields_len == 0) { - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, 0); } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, bits); } - } + }; - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag() != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(sema.mod.getTarget())) { + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } - try enum_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); - const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { - if (bag != 0) break true; - } else false; - if (any_values) { - try enum_obj.values.ensureTotalCapacityContext(decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - } - var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; var last_tag_val: ?Value = null; - var tag_val_buf: Value.Payload.U64 = undefined; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; @@ -2977,15 +3102,12 @@ fn zirEnumDecl( // doc comment extra_index += 1; - // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_field.index }).lazy; + const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir); + if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name_zir}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; @@ -2993,13 +3115,13 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } - if (has_tag_value) { + const tag_overflow = if (has_tag_value) overflow: { const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const tag_inst = try sema.resolveInst(tag_val_ref); - const tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { + last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { error.NeededSourceLocation => { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; @@ -3008,63 +3130,56 @@ fn zirEnumDecl( }, else => |e| return e, }; - last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; + last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - } else if (any_values) { - const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, enum_obj.tag_ty) + break :overflow false; + } else if (any_values) overflow: { + var overflow: ?usize = null; + last_tag_val = if (last_tag_val) |val| + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow) else - Value.zero; - last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + try mod.intValue(int_tag_ty, 0); + if (overflow != null) break :overflow true; + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - } else { - tag_val_buf = .{ - .base = .{ .tag = .int_u64 }, - .data = field_i, - }; - last_tag_val = Value.initPayload(&tag_val_buf.base); - } + break :overflow false; + } else overflow: { + last_tag_val = try mod.intValue(Type.comptime_int, field_i); + if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; + last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + break :overflow false; + }; - if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (tag_overflow) { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = if (has_tag_value) .value else .name, }).lazy; const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValue(enum_obj.tag_ty, mod), enum_obj.tag_ty.fmt(mod), + last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod), }); return sema.failWithOwnedErrorMsg(msg); } @@ -3081,6 +3196,8 @@ fn zirUnionDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3100,55 +3217,60 @@ fn zirUnionDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (small.has_tag_type or small.auto_enum_tag) - Type.Tag.union_tagged - else if (small.layout != .Auto) - Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); - const mod = sema.mod; const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = union_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = small.layout, .status = .none, - .namespace = .{ - .parent = block.namespace, - .ty = union_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ - &union_obj.namespace, new_decl, new_decl.name, + .namespace = new_namespace_index, }); + errdefer mod.destroyUnion(union_index); - _ = try mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); + const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) + .tagged + else if (small.layout != .Auto) + .none + else switch (block.sema.mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, + }, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(union_ty); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + new_decl.ty = Type.type; + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); + + _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirOpaqueDecl( @@ -3161,7 +3283,6 @@ fn zirOpaqueDecl( defer tracy.end(); const mod = sema.mod; - const gpa = sema.gpa; const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3177,42 +3298,42 @@ fn zirOpaqueDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used in two places before being set after the opaque + // type gains an InternPool index. - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = opaque_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ - &opaque_obj.namespace, new_decl, new_decl.name, + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); - extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); + const opaque_ty = try mod.intern(.{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(opaque_ty); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + new_decl.ty = Type.type; + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); + + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirErrorSetDecl( @@ -3224,48 +3345,39 @@ fn zirErrorSetDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); - const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); - const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); - const mod = sema.mod; - const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = error_set_val, - }, name_strategy, "error", inst); - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(new_decl_arena_allocator, extra.data.fields_len); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index = @intCast(u32, extra.end); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; - const kv = try mod.getErrorValue(sema.code.nullTerminatedString(str_index)); - const result = names.getOrPutAssumeCapacity(kv.key); + const name = sema.code.nullTerminatedString(str_index); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + _ = try mod.getErrorValue(name_ip); + const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } - // names must be sorted. - Module.ErrorSet.sortNames(&names); + const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys()); - error_set.* = .{ - .owner_decl = new_decl_index, - .names = names, - }; - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ + .ty = Type.type, + .val = error_set_ty.toValue(), + }, name_strategy, "error", inst); + const new_decl = mod.declPtr(new_decl_index); + new_decl.owns_tv = true; + errdefer mod.abortAnonDecl(new_decl_index); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { @@ -3319,7 +3431,8 @@ fn ensureResultUsed( ty: Type, src: LazySrcLoc, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet, .ErrorUnion => { const msg = msg: { @@ -3347,11 +3460,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(block, src, "error is discarded", .{}); @@ -3369,16 +3483,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const err_union_ty = if (operand_ty.zigTypeTag() == .Pointer) - operand_ty.childType() + const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) + operand_ty.childType(mod) else operand_ty; - if (err_union_ty.zigTypeTag() != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(); + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; + const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3407,11 +3522,13 @@ fn indexablePtrLen( src: LazySrcLoc, object: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const object_ty = sema.typeOf(object); - const is_pointer_to = object_ty.isSinglePointer(); - const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; + const is_pointer_to = object_ty.isSinglePointer(mod); + const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - return sema.fieldVal(block, src, object, "len", src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, object, field_name, src); } fn indexablePtrLenOrNone( @@ -3420,10 +3537,12 @@ fn indexablePtrLenOrNone( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); - if (operand_ty.ptrSize() == .Many) return .none; - return sema.fieldVal(block, src, operand, "len", src); + if (operand_ty.ptrSize(mod) == .Many) return .none; + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, operand, field_name, src); } fn zirAllocExtended( @@ -3431,6 +3550,7 @@ fn zirAllocExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; @@ -3451,22 +3571,19 @@ fn zirAllocExtended( break :blk alignment; } else 0; - const inferred_alloc_ty = if (small.is_const) - Type.initTag(.inferred_alloc_const) - else - Type.initTag(.inferred_alloc_mut); - if (block.is_comptime or small.is_comptime) { if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .inferred_alloc_comptime = .{ .decl_index = undefined, - .alignment = alignment, - }), - ); + .alignment = InternPool.Alignment.fromByteUnits(alignment), + .is_const = small.is_const, + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } } @@ -3484,17 +3601,15 @@ fn zirAllocExtended( return block.addTy(.alloc, ptr_type); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .inferred_alloc = .{ + .alignment = InternPool.Alignment.fromByteUnits(alignment), + .is_const = small.is_const, + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); + return Air.indexToRef(result_index); } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3508,11 +3623,12 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; // Detect if all stores to an `.alloc` were comptime-known. @@ -3558,8 +3674,8 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try elem_ty.copy(anon_decl.arena()), - try store_val.copy(anon_decl.arena()), + elem_ty, + store_val, ptr_info.@"align", )); } @@ -3568,15 +3684,16 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); ptr_info.mutable = false; const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); // Detect if a comptime value simply needs to have its type changed. if (try sema.resolveMaybeUndefVal(alloc)) |val| { - return sema.addConstant(const_ptr_ty, val); + return sema.addConstant(const_ptr_ty, try mod.getCoerced(val, const_ptr_ty)); } return block.addBitCast(const_ptr_ty, alloc); @@ -3585,18 +3702,22 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai fn zirAllocInferredComptime( sema: *Sema, inst: Zir.Inst.Index, - inferred_alloc_ty: Type, + is_const: bool, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .inferred_alloc_comptime = .{ .decl_index = undefined, - .alignment = 0, - }), - ); + .alignment = .none, + .is_const = is_const, + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3642,104 +3763,103 @@ fn zirAllocInferred( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - inferred_alloc_ty: Type, + is_const: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; if (block.is_comptime) { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .inferred_alloc_comptime = .{ .decl_index = undefined, - .alignment = 0, - }), - ); + .alignment = .none, + .is_const = is_const, + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .inferred_alloc = .{ + .alignment = .none, + .is_const = is_const, + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); + return Air.indexToRef(result_index); } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); - const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; - const ptr_val = sema.air_values.items[value_index]; - const var_is_mut = switch (sema.typeOf(ptr).tag()) { - .inferred_alloc_const => false, - .inferred_alloc_mut => true, - else => unreachable, - }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - const decl_index = iac.data.decl_index; - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const iac = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; + const decl_index = iac.decl_index; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); - const decl = sema.mod.declPtr(decl_index); - const final_elem_ty = try decl.ty.copy(sema.arena); - const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = iac.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const decl = mod.declPtr(decl_index); + if (iac.is_const) try decl.intern(mod); + const final_elem_ty = decl.ty; + const final_ptr_ty = try mod.ptrType(.{ + .child = final_elem_ty.toIntern(), + .flags = .{ + .is_const = false, + .alignment = iac.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); - const final_ptr_ty_inst = try sema.addType(final_ptr_ty); - sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); - if (var_is_mut) { - sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = decl_index, - .runtime_index = block.runtime_index, - }); - } else { - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index); - } + // Change it to an interned. + sema.air_instructions.set(ptr_inst, .{ + .tag = .interned, + .data = .{ .interned = try mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.toIntern(), + .addr = if (!iac.is_const) .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } } else .{ .decl = decl_index }, + } }) }, + }); }, .inferred_alloc => { - assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - const peer_inst_list = inferred_alloc.data.prongs.items(.stored_inst); + const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc; + const ia2 = sema.unresolved_inferred_allocs.fetchRemove(ptr_inst).?.value; + const peer_inst_list = ia2.prongs.items(.stored_inst); const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); - const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const final_ptr_ty = try mod.ptrType(.{ + .child = final_elem_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); - if (var_is_mut) { + if (!ia1.is_const) { try sema.validateVarType(block, ty_src, final_elem_ty, false); } else ct: { // Detect if the value is comptime-known. In such case, the // last 3 AIR instructions of the block will look like this: // - // %a = constant + // %a = inferred_alloc // %b = bitcast(%a) // %c = store(%b, %d) // @@ -3779,43 +3899,46 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } }; - const const_inst = while (true) { + while (true) { if (search_index == 0) break :ct; search_index -= 1; const candidate = block.instructions.items[search_index]; + if (candidate == ptr_inst) break; switch (air_tags[candidate]) { .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue, - .constant => break candidate, else => break :ct, } - }; + } const store_op = air_datas[store_inst].bin_op; const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct; if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct; - if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct; + if (air_datas[bitcast_inst].ty_op.operand != ptr) break :ct; const new_decl_index = d: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const new_decl_index = try anon_decl.finish( - try final_elem_ty.copy(anon_decl.arena()), - try store_val.copy(anon_decl.arena()), - inferred_alloc.data.alignment, + final_elem_ty, + store_val, + ia1.alignment.toByteUnits(0), ); break :d new_decl_index; }; - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); // Even though we reuse the constant instruction, we still remove it from the // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - // if bitcast ty ref needs to be made const, make_ptr_const - // ZIR handles it later, so we can just use the ty ref here. - air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; + sema.air_instructions.set(ptr_inst, .{ + .tag = .interned, + .data = .{ .interned = try mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.toIntern(), + .addr = .{ .decl = new_decl_index }, + } }) }, + }); // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be @@ -3836,18 +3959,19 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Now we need to go back over all the coerce_result_ptr instructions, which // previously inserted a bitcast as a placeholder, and do the logic as if // the new result ptr type was available. - const placeholders = inferred_alloc.data.prongs.items(.placeholder); + const placeholders = ia2.prongs.items(.placeholder); const gpa = sema.gpa; var trash_block = block.makeSubBlock(); trash_block.is_comptime = false; defer trash_block.instructions.deinit(gpa); - const mut_final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const mut_final_ptr_ty = try mod.ptrType(.{ + .child = final_elem_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty); const empty_trash_count = trash_block.instructions.items.len; @@ -3855,7 +3979,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com for (peer_inst_list, placeholders) |peer_inst, placeholder_inst| { const sub_ptr_ty = sema.typeOf(Air.indexToRef(placeholder_inst)); - if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) { + if (mut_final_ptr_ty.eql(sub_ptr_ty, mod)) { // New result location type is the same as the old one; nothing // to do here. continue; @@ -3920,27 +4044,28 @@ fn zirArrayBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + const elem_ty = sema.typeOf(base_ptr).childType(mod); + switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, - .Struct => if (elem_ty.isTuple()) { + .Struct => if (elem_ty.isTuple(mod)) { // TODO validate element count return base_ptr; }, else => {}, } - return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirFieldBasePtr( @@ -3948,27 +4073,30 @@ fn zirFieldBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + const elem_ty = sema.typeOf(base_ptr).childType(mod); + switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } - return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); @@ -3991,7 +4119,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - const is_int = switch (object_ty.zigTypeTag()) { + const is_int = switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => true, else => false, }; @@ -4000,7 +4128,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }; const arg_len_uncoerced = if (is_int) object else l: { - if (!object_ty.isIndexable()) { + if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); @@ -4010,9 +4138,9 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }; return sema.failWithOwnedErrorMsg(msg); } - if (!object_ty.indexableHasLen()) continue; + if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len"), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4061,7 +4189,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => continue, else => {}, } @@ -4096,15 +4224,16 @@ fn validateArrayInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; const ty = try sema.resolveType(block, ty_src, extra.ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Array => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} array elements; found {d}", .{ array_len, extra.init_count, @@ -4113,7 +4242,7 @@ fn validateArrayInitTy( return; }, .Vector => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} vector elements; found {d}", .{ array_len, extra.init_count, @@ -4121,9 +4250,9 @@ fn validateArrayInitTy( } return; }, - .Struct => if (ty.isTuple()) { + .Struct => if (ty.isTuple(mod)) { _ = try sema.resolveTypeFields(ty); - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ array_len, extra.init_count, @@ -4141,11 +4270,12 @@ fn validateStructInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => return, else => {}, } @@ -4160,6 +4290,7 @@ fn zirValidateStructInit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4167,8 +4298,8 @@ fn zirValidateStructInit( const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); - const agg_ty = sema.typeOf(object_ptr).childType(); - switch (agg_ty.zigTypeTag()) { + const agg_ty = sema.typeOf(object_ptr).childType(mod); + switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, agg_ty, @@ -4194,6 +4325,9 @@ fn validateUnionInit( instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; + if (instrs.len != 1) { const msg = msg: { const msg = try sema.errMsg( @@ -4202,7 +4336,7 @@ fn validateUnionInit( "cannot initialize multiple union fields at once; unions can only have one active field", .{}, ); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); for (instrs[1..]) |inst| { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -4226,7 +4360,7 @@ fn validateUnionInit( const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_ptr_extra.field_name_start)); // Validate the field access but ignore the index since we want the tag enum field index. _ = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const air_tags = sema.air_instructions.items(.tag); @@ -4291,21 +4425,25 @@ fn validateUnionInit( break; } - const tag_ty = union_ty.unionTagTypeHypothetical(); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); if (init_val) |val| { // Our task is to delete all the `field_ptr` and `store` instructions, and insert // instead a single `store` to the result ptr with a comptime union value. block.instructions.shrinkRetainingCapacity(first_block_index); - var union_val = try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val, - }); - if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val); - const union_init = try sema.addConstant(union_ty, union_val); + var union_val = try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val.toIntern(), + } }); + if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ + .ty = union_ty.toIntern(), + .val = union_val, + } }); + const union_init = try sema.addConstant(union_ty, union_val.toValue()); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; } else if (try sema.typeRequiresComptime(union_ty)) { @@ -4323,10 +4461,12 @@ fn validateStructInit( init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; // Maps field index to field_ptr index of where it was already initialized. - const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod)); defer gpa.free(found_fields); @memset(found_fields, 0); @@ -4337,8 +4477,11 @@ fn validateStructInit( const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); - const field_index = if (struct_ty.isTuple()) + const field_name = try ip.getOrPutString( + gpa, + sema.code.nullTerminatedString(field_ptr_extra.field_name_start), + ); + const field_index = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else try sema.structFieldIndex(block, struct_ty, field_name, field_src); @@ -4371,9 +4514,9 @@ fn validateStructInit( for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) continue; - const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { - if (struct_ty.isTuple()) { + const default_val = struct_ty.structFieldDefaultValue(i, mod); + if (default_val.toIntern() == .unreachable_value) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4382,9 +4525,9 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); - const template = "missing struct field: {s}"; - const args = .{field_name}; + const field_name = struct_ty.structFieldName(i, mod); + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4394,25 +4537,23 @@ fn validateStructInit( } const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const mod = sema.mod; - const fqn = try struct_obj.data.getFullyQualifiedName(mod); - defer gpa.free(fqn); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); try mod.errNoteNonLazy( - struct_obj.data.srcLoc(mod), + struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -4432,14 +4573,14 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount()); + const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. - const field_ty = struct_ty.structFieldType(i); + const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv; + field_values[i] = opv.toIntern(); continue; } @@ -4504,7 +4645,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val; + field_values[i] = val.toIntern(); } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4517,9 +4658,9 @@ fn validateStructInit( continue :field; } - const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { - if (struct_ty.isTuple()) { + const default_val = struct_ty.structFieldDefaultValue(i, mod); + if (default_val.toIntern() == .unreachable_value) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4528,9 +4669,9 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); - const template = "missing struct field: {s}"; - const args = .{field_name}; + const field_name = struct_ty.structFieldName(i, mod); + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4538,18 +4679,17 @@ fn validateStructInit( } continue; } - field_values[i] = default_val; + field_values[i] = default_val.toIntern(); } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -4561,9 +4701,15 @@ fn validateStructInit( // instead a single `store` to the struct_ptr with a comptime struct value. block.instructions.shrinkRetainingCapacity(first_block_index); - var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); - if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val); - const struct_init = try sema.addConstant(struct_ty, struct_val); + var struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.toIntern(), + .storage = .{ .elems = field_values }, + } }); + if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ + .ty = struct_ty.toIntern(), + .val = struct_val, + } }); + const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } @@ -4574,12 +4720,12 @@ fn validateStructInit( if (field_ptr != 0) continue; const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); - const init = try sema.addConstant(field_ty, field_values[i]); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); + const init = try sema.addConstant(field_ty, field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } } @@ -4589,6 +4735,7 @@ fn zirValidateArrayInit( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4596,18 +4743,18 @@ fn zirValidateArrayInit( const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data; const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr); - const array_ty = sema.typeOf(array_ptr).childType(); - const array_len = array_ty.arrayLen(); + const array_ty = sema.typeOf(array_ptr).childType(mod); + const array_len = array_ty.arrayLen(mod); - if (instrs.len != array_len) switch (array_ty.zigTypeTag()) { + if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); var i = instrs.len; while (i < array_len) : (i += 1) { - const default_val = array_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + const default_val = array_ty.structFieldDefaultValue(i, mod); + if (default_val.toIntern() == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4642,39 +4789,41 @@ fn zirValidateArrayInit( // at comptime so we have almost nothing to do here. However, in case of a // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); - const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val); + const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); } return; } + // If the array has one possible value, the value is always comptime-known. + if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| { + const array_init = try sema.addConstant(array_ty, array_opv); + try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); + return; + } + var array_is_comptime = true; var first_block_index = block.instructions.items.len; var make_runtime = false; // Collect the comptime element values in case the array literal ends up // being comptime-known. - const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel()); - const element_vals = try sema.arena.alloc(Value, array_len_s); - const opt_opv = try sema.typeHasOnePossibleValue(array_ty); + const element_vals = try sema.arena.alloc( + InternPool.Index, + try sema.usizeCast(block, init_src, array_len), + ); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); outer: for (instrs, 0..) |elem_ptr, i| { // Determine whether the value stored to this pointer is comptime-known. - if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(i)) |opv| { - element_vals[i] = opv; - continue; - } - } else { - // Array has one possible value, so value is always comptime-known - if (opt_opv) |opv| { - element_vals[i] = opv; + if (array_ty.isTuple(mod)) { + if (try array_ty.structFieldValueComptime(mod, i)) |opv| { + element_vals[i] = opv.toIntern(); continue; } } @@ -4735,7 +4884,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val; + element_vals[i] = val.toIntern(); } else { array_is_comptime = false; } @@ -4747,50 +4896,55 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - if (ptr_val.tag() == .comptime_field_ptr) { - // This store was validated by the individual elem ptrs. - return; + switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .comptime_field => return, // This store was validated by the individual elem ptrs. + else => {}, + }, + else => {}, } } // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. - // Also to populate the sentinel value, if any. - if (array_ty.sentinel()) |sentinel_val| { - element_vals[instrs.len] = sentinel_val; - } - block.instructions.shrinkRetainingCapacity(first_block_index); - var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); - if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val); - const array_init = try sema.addConstant(array_ty, array_val); + var array_val = try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } }); + if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ + .ty = array_ty.toIntern(), + .val = array_val, + } }); + const array_init = try sema.addConstant(array_ty, array_val.toValue()); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); - } else switch (operand_ty.ptrSize()) { + if (operand_ty.zigTypeTag(mod) != .Pointer) { + return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)}); + } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, - .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}), - .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}), + .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}), + .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}), } - if ((try sema.typeHasOnePossibleValue(operand_ty.childType())) != null) { + if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { // No need to validate the actual pointer value, we don't need it! return; } - const elem_ty = operand_ty.elemType2(); + const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.fail(block, src, "cannot dereference undefined value", .{}); } } else if (!(try sema.validateRunTimeType(elem_ty, false))) { @@ -4799,12 +4953,12 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr block, src, "values of type '{}' must be comptime-known, but operand value is runtime-known", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), elem_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -4816,23 +4970,24 @@ fn failWithBadMemberAccess( block: *Block, agg_ty: Type, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { - const kw_name = switch (agg_ty.zigTypeTag()) { + const mod = sema.mod; + const kw_name = switch (agg_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", .Enum => "enum", else => unreachable, }; - if (agg_ty.getOwnerDeclOrNull()) |some| if (sema.mod.declIsRoot(some)) { - return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ - agg_ty.fmt(sema.mod), field_name, + if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) { + return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ + agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), }); }; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ - kw_name, agg_ty.fmt(sema.mod), field_name, + const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{}'", .{ + kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, agg_ty); @@ -4846,22 +5001,22 @@ fn failWithBadStructFieldAccess( block: *Block, struct_obj: *Module.Struct, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = try struct_obj.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( block, field_src, - "no field named '{s}' in struct '{s}'", - .{ field_name, fqn }, + "no field named '{}' in struct '{}'", + .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(struct_obj.srcLoc(sema.mod), msg, "struct declared here", .{}); + try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -4872,30 +5027,31 @@ fn failWithBadUnionFieldAccess( block: *Block, union_obj: *Module.Union, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; - const fqn = try union_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = try union_obj.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( block, field_src, - "no field named '{s}' in union '{s}'", - .{ field_name, fqn }, + "no field named '{}' in union '{}'", + .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(union_obj.srcLoc(sema.mod), msg, "union declared here", .{}); + try mod.errNoteNonLazy(union_obj.srcLoc(mod), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return; - const category = switch (decl_ty.zigTypeTag()) { + const mod = sema.mod; + const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return; + const category = switch (decl_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Enum => "enum", @@ -4903,7 +5059,7 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo .ErrorSet => "error set", else => unreachable, }; - try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); + try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -4919,17 +5075,14 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src: LazySrcLoc = sema.src; blk: { const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk; - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const iac = &sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); + const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + return sema.storeToInferredAlloc(block, ptr, operand, ia); }, else => break :blk, } @@ -4947,18 +5100,16 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const ptr = try sema.resolveInst(bin_inst.lhs); const operand = try sema.resolveInst(bin_inst.rhs); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const iac = &air_datas[ptr_inst].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); + const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + return sema.storeToInferredAlloc(block, ptr, operand, ia); }, else => unreachable, } @@ -4969,14 +5120,14 @@ fn storeToInferredAlloc( block: *Block, ptr: Air.Inst.Ref, operand: Air.Inst.Ref, - inferred_alloc: *Value.Payload.InferredAlloc, + inferred_alloc: *InferredAlloc, ) CompileError!void { // Create a store instruction as a placeholder. This will be replaced by a // proper store sequence once we know the stored type. const dummy_store = try block.addBinOp(.store, ptr, operand); // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. - try inferred_alloc.data.prongs.append(sema.arena, .{ + try inferred_alloc.prongs.append(sema.arena, .{ .stored_inst = operand, .placeholder = Air.refToIndex(dummy_store).?, }); @@ -4987,20 +5138,21 @@ fn storeToInferredAllocComptime( block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, - iac: *Value.Payload.InferredAllocComptime, + iac: *Air.Inst.Data.InferredAllocComptime, ) CompileError!void { const operand_ty = sema.typeOf(operand); // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: { - if (operand_val.tag() == .variable) break :store; + if (operand_val.getVariable(sema.mod) != null) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), - try operand_val.copy(anon_decl.arena()), - iac.data.alignment, + iac.decl_index = try anon_decl.finish( + operand_ty, + operand_val, + iac.alignment.toByteUnits(0), ); + try sema.comptime_mutable_decls.append(iac.decl_index); return; } @@ -5028,6 +5180,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; @@ -5046,9 +5199,9 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. - if (is_ret and (sema.typeOf(operand).zigTypeTag() == .ErrorUnion or - sema.typeOf(operand).zigTypeTag() == .ErrorSet) and - sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) + if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or + sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and + sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(operand); } @@ -5072,47 +5225,30 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins return sema.addStrLit(block, bytes); } -fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref { - // `zir_bytes` references memory inside the ZIR module, which can get deallocated - // after semantic analysis is complete, for example in the case of the initialization - // expression of a variable declaration. +fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; - const string_bytes = &mod.string_literal_bytes; - const StringLiteralAdapter = Module.StringLiteralAdapter; - const StringLiteralContext = Module.StringLiteralContext; - try string_bytes.ensureUnusedCapacity(gpa, zir_bytes.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(gpa, zir_bytes, StringLiteralAdapter{ - .bytes = string_bytes, - }, StringLiteralContext{ - .bytes = string_bytes, + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_bytes = try sema.arena.dupe(u8, bytes); + const ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = duped_bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, zir_bytes.len), - }; - string_bytes.appendSliceAssumeCapacity(zir_bytes); - gop.value_ptr.* = .none; + const new_decl_index = try mod.createAnonymousDecl(block, .{ + .ty = ty, + .val = val.toValue(), + }); + gop.value_ptr.* = new_decl_index; + try mod.finalizeAnonDecl(new_decl_index); } - const decl_index = gop.value_ptr.unwrap() orelse di: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - - const decl_index = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), gop.key_ptr.len), - try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), - 0, // default alignment - ); - - // Needed so that `Decl.clearValues` will additionally set the corresponding - // string literal table value back to `Decl.OptionalIndex.none`. - mod.declPtr(decl_index).owns_tv = true; - - gop.value_ptr.* = decl_index.toOptional(); - break :di decl_index; - }; - return sema.analyzeDeclRef(decl_index); + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5121,7 +5257,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; - return sema.addIntUnsigned(Type.initTag(.comptime_int), int); + return sema.addIntUnsigned(Type.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5129,38 +5265,43 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const arena = sema.arena; + const mod = sema.mod; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; - const limbs = try arena.alloc(std.math.big.Limb, int.len); + + // TODO: this allocation and copy is only needed because the limbs may be unaligned. + // If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these + // two lines can be removed. + const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( - Type.initTag(.comptime_int), - try Value.Tag.int_big_positive.create(arena, limbs), + Type.comptime_int, + try mod.intValue_big(Type.comptime_int, .{ + .limbs = limbs, + .positive = true, + }), ); } fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( - Type.initTag(.comptime_float), - try Value.Tag.float_64.create(arena, number), + Type.comptime_float, + try sema.mod.floatValue(Type.comptime_float, number), ); } fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( - Type.initTag(.comptime_float), - try Value.Tag.float_128.create(arena, number), + Type.comptime_float, + try sema.mod.floatValue(Type.comptime_float, number), ); } @@ -5179,7 +5320,9 @@ fn zirCompileLog( sema: *Sema, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - var managed = sema.mod.compile_log_text.toManaged(sema.gpa); + const mod = sema.mod; + + var managed = mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -5192,19 +5335,18 @@ fn zirCompileLog( const arg = try sema.resolveInst(arg_ref); const arg_ty = sema.typeOf(arg); - if (try sema.resolveMaybeUndefVal(arg)) |val| { - try sema.resolveLazyValue(val); + if (try sema.resolveMaybeUndefLazyVal(arg)) |val| { try writer.print("@as({}, {})", .{ - arg_ty.fmt(sema.mod), val.fmtValue(arg_ty, sema.mod), + arg_ty.fmt(mod), val.fmtValue(arg_ty, mod), }); } else { - try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(sema.mod)}); + try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)}); } } try writer.print("\n", .{}); const decl_index = if (sema.func) |some| some.owner_decl else sema.owner_decl_index; - const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, decl_index); + const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = src_node; } @@ -5235,6 +5377,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); @@ -5284,7 +5427,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError try sema.analyzeBody(&loop_block, body); const loop_block_len = loop_block.instructions.items.len; - if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn()) { + if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn(mod)) { // If the loop ended with a noreturn terminator, then there is no way for it to loop, // so we can just use the block instead. try child_block.instructions.appendSlice(gpa, loop_block.instructions.items); @@ -5311,7 +5454,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // we check this here to avoid undefined symbols if (!@import("build_options").have_llvm) - return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{}); + return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{}); var c_import_buf = std.ArrayList(u8).init(sema.gpa); defer c_import_buf.deinit(); @@ -5354,7 +5497,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!mod.comp.bin_file.options.link_libc) try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try sema.mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index); + const gop = try mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index); if (!gop.found_existing) { var errs = try std.ArrayListUnmanaged(Module.CImportError).initCapacity(sema.gpa, c_import_res.errors.len); errdefer { @@ -5537,7 +5680,7 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn(mod)); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions @@ -5578,7 +5721,7 @@ fn analyzeBlockBody( try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{}); const child_src_decl = mod.declPtr(child_block.src_decl); - try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl), resolved_ty); + try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl, mod), resolved_ty); break :msg msg; }; @@ -5649,15 +5792,16 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const decl_name = sema.code.nullTerminatedString(extra.decl_name); + const decl_name = try mod.intern_pool.getOrPutString(mod.gpa, sema.code.nullTerminatedString(extra.decl_name)); const decl_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); - const container_namespace = container_ty.getNamespace().?; + const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?; const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false); break :index_blk maybe_index orelse @@ -5671,10 +5815,10 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void else => |e| return e, }; { - try sema.mod.ensureDeclAnalyzed(decl_index); - const exported_decl = sema.mod.declPtr(decl_index); - if (exported_decl.val.castTag(.function)) |some| { - return sema.analyzeExport(block, src, options, some.data.owner_decl); + try mod.ensureDeclAnalyzed(decl_index); + const exported_decl = mod.declPtr(decl_index); + if (exported_decl.val.getFunction(mod)) |function| { + return sema.analyzeExport(block, src, options, function.owner_decl); } } try sema.analyzeExport(block, src, options, decl_index); @@ -5697,17 +5841,14 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, else => |e| return e, }; - const decl_index = switch (operand.val.tag()) { - .function => operand.val.castTag(.function).?.data.owner_decl, - else => blk: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - break :blk try anon_decl.finish( - try operand.ty.copy(anon_decl.arena()), - try operand.val.copy(anon_decl.arena()), - 0, - ); - }, + const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + break :blk try anon_decl.finish( + operand.ty, + operand.val, + 0, + ); }; try sema.analyzeExport(block, src, options, decl_index); } @@ -5716,13 +5857,13 @@ pub fn analyzeExport( sema: *Sema, block: *Block, src: LazySrcLoc, - borrowed_options: std.builtin.ExportOptions, + options: Module.Export.Options, exported_decl_index: Decl.Index, ) !void { const Export = Module.Export; const mod = sema.mod; - if (borrowed_options.linkage == .Internal) { + if (options.linkage == .Internal) { return; } @@ -5731,11 +5872,11 @@ pub fn analyzeExport( if (!try sema.validateExternType(exported_decl.ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), exported_decl.ty, .other); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other); try sema.addDeclaredHereNote(msg, exported_decl.ty); break :msg msg; @@ -5744,15 +5885,15 @@ pub fn analyzeExport( } // TODO: some backends might support re-exporting extern decls - if (exported_decl.isExtern()) { + if (exported_decl.isExtern(mod)) { return sema.fail(block, src, "export target cannot be extern", .{}); } // This decl is alive no matter what, since it's being exported - mod.markDeclAlive(exported_decl); + try mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); - const gpa = mod.gpa; + const gpa = sema.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); @@ -5760,19 +5901,8 @@ pub fn analyzeExport( const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); - const symbol_name = try gpa.dupe(u8, borrowed_options.name); - errdefer gpa.free(symbol_name); - - const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; - errdefer if (section) |s| gpa.free(s); - new_export.* = .{ - .options = .{ - .name = symbol_name, - .linkage = borrowed_options.linkage, - .section = section, - .visibility = borrowed_options.visibility, - }, + .opts = options, .src = src, .owner_decl = sema.owner_decl_index, .src_decl = block.src_decl, @@ -5798,6 +5928,7 @@ pub fn analyzeExport( } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const src = LazySrcLoc.nodeOffset(extra.node); @@ -5807,11 +5938,12 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst alignment, }); } - const func = sema.func orelse + const func_index = sema.func_index.unwrap() orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); + const func = mod.funcPtr(func_index); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); - switch (fn_owner_decl.ty.fnCallingConvention()) { + const fn_owner_decl = mod.declPtr(func.owner_decl); + switch (fn_owner_decl.ty.fnCallingConvention(mod)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => if (block.inlining != null) { @@ -5819,7 +5951,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -5971,10 +6103,11 @@ fn addDbgVar( air_tag: Air.Inst.Tag, name: []const u8, ) CompileError!void { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (air_tag) { .dbg_var_ptr => { - if (!(try sema.typeHasRuntimeBits(operand_ty.childType()))) return; + if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return; }, .dbg_var_val => { if (!(try sema.typeHasRuntimeBits(operand_ty))) return; @@ -6003,29 +6136,32 @@ fn addDbgVar( } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl_index = try sema.lookupIdentifier(block, src, decl_name); try sema.addReferencedBy(block, src, decl_index); return sema.analyzeDeclRef(decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclVal(block, src, decl); } -fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { +fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !Decl.Index { + const mod = sema.mod; var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| { return decl_index; } - namespace = namespace.parent orelse break; + namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break; } unreachable; // AstGen detects use of undeclared identifier errors. } @@ -6036,21 +6172,22 @@ fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - ident_name: []const u8, + namespace_index: Namespace.Index, + ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, ) CompileError!?Decl.Index { const mod = sema.mod; - const namespace_decl_index = namespace.getDeclIndex(); - const namespace_decl = sema.mod.declPtr(namespace_decl_index); + const namespace = mod.namespacePtr(namespace_index); + const namespace_decl_index = namespace.getDeclIndex(mod); + const namespace_decl = mod.declPtr(namespace_decl_index); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); return error.AnalysisFail; } if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { - const src_file = block.namespace.file_scope; + const src_file = mod.namespacePtr(block.namespace).file_scope; const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{}; @@ -6069,7 +6206,7 @@ fn lookupInNamespace( // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. const decl = mod.declPtr(decl_index); - if (decl.is_pub or (src_file == decl.getFileScope() and checked_namespaces.values()[check_i])) { + if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) { try candidates.append(gpa, decl_index); } } @@ -6080,15 +6217,15 @@ fn lookupInNamespace( if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue; const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); const sub_is_pub = entry.value_ptr.*; - if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { + if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); - const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; - const sub_ns = ns_ty.getNamespace().?; - try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope()); + const ns_ty = sub_usingnamespace_decl.val.toType(); + const sub_ns = ns_ty.getNamespace(mod).?; + try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod)); } } @@ -6116,7 +6253,7 @@ fn lookupInNamespace( errdefer msg.destroy(gpa); for (candidates.items) |candidate_index| { const candidate = mod.declPtr(candidate_index); - const src_loc = candidate.srcLoc(); + const src_loc = candidate.srcLoc(mod); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } break :msg msg; @@ -6129,9 +6266,6 @@ fn lookupInNamespace( return decl_index; } - log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ - sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, - }); // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. @@ -6140,22 +6274,28 @@ fn lookupInNamespace( } fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { + const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; - if (func_val.isUndef()) return null; - const owner_decl_index = switch (func_val.tag()) { - .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, - .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + if (func_val.isUndef(mod)) return null; + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl, + else => return null, + }, else => return null, }; - return sema.mod.declPtr(owner_decl_index); + return mod.declPtr(owner_decl_index); } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const src = sema.src; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return .none; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return .none; + if (!mod.backendSupportsFeature(.error_return_trace)) return .none; + if (!mod.comp.bin_file.options.error_return_tracing) return .none; if (block.is_comptime) return .none; @@ -6168,7 +6308,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; - const field_index = sema.structFieldIndex(block, stack_trace_ty, "index", src) catch |err| switch (err) { + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) { error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; @@ -6191,6 +6332,8 @@ fn popErrorReturnTrace( operand: Air.Inst.Ref, saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; if (operand != .none) { @@ -6205,15 +6348,16 @@ fn popErrorReturnTrace( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { // The result might be an error. If it is, we leave the error trace alone. If it isn't, we need // to pop any error trace that may have been propagated from our arguments. - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len); const cond_block_inst = try block.addInstAsIndex(.{ .tag = .block, .data = .{ @@ -6225,28 +6369,29 @@ fn popErrorReturnTrace( }); var then_block = block.makeSubBlock(); - defer then_block.instructions.deinit(sema.gpa); + defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); // Otherwise, do nothing var else_block = block.makeSubBlock(); - defer else_block.instructions.deinit(sema.gpa); + defer else_block.instructions.deinit(gpa); _ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.CondBr).Struct.fields.len + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); - try sema.air_instructions.append(sema.gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ + try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), @@ -6270,6 +6415,7 @@ fn zirCall( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6288,7 +6434,7 @@ fn zirCall( .direct => .{ .direct = try sema.resolveInst(extra.data.callee) }, .field => blk: { const object_ptr = try sema.resolveInst(extra.data.obj_ptr); - const field_name = sema.code.nullTerminatedString(extra.data.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.data.field_name_start)); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src); }, @@ -6320,8 +6466,7 @@ fn zirCall( var input_is_error = false; const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); - const func_ty_info = func_ty.fnInfo(); - const fn_params_len = func_ty_info.param_types.len; + const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. var extra_index: usize = 0; @@ -6330,32 +6475,33 @@ fn zirCall( extra_index += 1; arg_index += 1; }) { + const func_ty_info = mod.typeToFunc(func_ty).?; const arg_end = sema.code.extra[extra.end + extra_index]; defer arg_start = arg_end; // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } sema.inst_map.putAssumeCapacity(inst, inst: { if (arg_index >= fn_params_len) - break :inst Air.Inst.Ref.var_args_param; + break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].tag() == .generic_poison) + if (func_ty_info.param_types[arg_index] == .generic_poison_type) break :inst Air.Inst.Ref.generic_poison_type; - break :inst try sema.addType(func_ty_info.param_types[arg_index]); + break :inst try sema.addType(func_ty_info.param_types[arg_index].toType()); }); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); const resolved_ty = sema.typeOf(resolved); - if (resolved_ty.zigTypeTag() == .NoReturn) { + if (resolved_ty.zigTypeTag(mod) == .NoReturn) { return resolved; } - if (resolved_ty.isError()) { + if (resolved_ty.isError(mod)) { input_is_error = true; } resolved_args[arg_index] = resolved; @@ -6367,7 +6513,7 @@ fn zirCall( // AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction. const call_dbg_node = inst - 1; - if (sema.mod.backendSupportsFeature(.error_return_trace) and sema.mod.comp.bin_file.options.error_return_tracing and + if (mod.backendSupportsFeature(.error_return_trace) and mod.comp.bin_file.options.error_return_tracing and !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace)) { const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: { @@ -6375,15 +6521,16 @@ fn zirCall( }; const return_ty = sema.typeOf(call_inst); - if (modifier != .always_tail and return_ty.isNoReturn()) + if (modifier != .always_tail and return_ty.isNoReturn(mod)) return call_inst; // call to "fn(...) noreturn", don't pop // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. - if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) { + if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index"); + const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated const save_inst = try block.insertInst(block_index, .{ @@ -6417,24 +6564,24 @@ fn checkCallArgumentCount( total_args: usize, member_fn: bool, ) !Type { + const mod = sema.mod; const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + const ptr_info = callee_ty.ptrInfo(mod); + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag() == .Fn or (opt_child.isSinglePointer() and - opt_child.childType().zigTypeTag() == .Fn)) + const opt_child = callee_ty.optionalChild(mod); + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and + opt_child.childType(mod).zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ - callee_ty.fmt(sema.mod), + callee_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, func_src, msg, "consider using '.?', 'orelse' or 'if'", .{}); @@ -6445,10 +6592,10 @@ fn checkCallArgumentCount( }, else => {}, } - return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); + return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const args_len = total_args - @boolToInt(member_fn); if (func_ty_info.is_var_args) { @@ -6475,7 +6622,7 @@ fn checkCallArgumentCount( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6488,22 +6635,23 @@ fn callBuiltin( modifier: std.builtin.CallModifier, args: []const Air.Inst.Ref, ) !void { + const mod = sema.mod; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + const ptr_info = callee_ty.ptrInfo(mod); + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, else => {}, } - std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)}); + std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); @@ -6511,76 +6659,6 @@ fn callBuiltin( _ = try sema.analyzeCall(block, builtin_fn, func_ty, sema.src, sema.src, modifier, false, args, null, null); } -const GenericCallAdapter = struct { - generic_fn: *Module.Fn, - precomputed_hash: u64, - func_ty_info: Type.Payload.Function.Data, - args: []const Arg, - module: *Module, - - const Arg = struct { - ty: Type, - val: Value, - is_anytype: bool, - }; - - pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { - _ = adapted_key; - // Checking for equality may happen on an item that has been inserted - // into the map but is not yet fully initialized. In such case, the - // two initialized fields are `hash` and `generic_owner_decl`. - if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false; - - const other_comptime_args = other_key.comptime_args.?; - for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { - const this_arg = ctx.args[i]; - const this_is_comptime = this_arg.val.tag() != .generic_poison; - const other_is_comptime = other_arg.val.tag() != .generic_poison; - const this_is_anytype = this_arg.is_anytype; - const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i)); - - if (other_is_anytype != this_is_anytype) return false; - if (other_is_comptime != this_is_comptime) return false; - - if (this_is_anytype) { - // Both are anytype parameters. - if (!this_arg.ty.eql(other_arg.ty, ctx.module)) { - return false; - } - if (this_is_comptime) { - // Both are comptime and anytype parameters with matching types. - if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) { - return false; - } - } - } else if (this_is_comptime) { - // Both are comptime parameters but not anytype parameters. - // We assert no error is possible here because any lazy values must be resolved - // before inserting into the generic function hash map. - const is_eql = Value.eqlAdvanced( - this_arg.val, - this_arg.ty, - other_arg.val, - other_arg.ty, - ctx.module, - null, - ) catch unreachable; - if (!is_eql) { - return false; - } - } - } - return true; - } - - /// The implementation of the hash is in semantic analysis of function calls, so - /// that any errors when computing the hash can be properly reported. - pub fn hash(ctx: @This(), adapted_key: void) u64 { - _ = adapted_key; - return ctx.precomputed_hash; - } -}; - fn analyzeCall( sema: *Sema, block: *Block, @@ -6597,7 +6675,7 @@ fn analyzeCall( const mod = sema.mod; const callee_ty = sema.typeOf(func); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const cc = func_ty_info.cc; if (cc == .Naked) { @@ -6611,7 +6689,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6645,7 +6723,7 @@ fn analyzeCall( var comptime_reason_buf: Block.ComptimeReason = undefined; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { - if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| { + if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| { is_comptime_call = ct; if (ct) { // stage1 can't handle doing this directly @@ -6653,7 +6731,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; } @@ -6671,7 +6749,7 @@ fn analyzeCall( func, func_src, call_src, - func_ty_info, + func_ty, ensure_result_used, uncasted_args, call_tag, @@ -6691,7 +6769,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; }, @@ -6708,18 +6786,21 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn = switch (func_val.tag()) { - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, - .function => func_val.castTag(.function).?.data, - .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), - else => { - assert(callee_ty.isPtrAtRuntime()); - return sema.fail(block, call_src, "{s} call of function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), - }); + .func => |function| function.index, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).val.getFunctionIndex(mod).unwrap().?, + else => { + assert(callee_ty.isPtrAtRuntime(mod)); + return sema.fail(block, call_src, "{s} call of function pointer", .{ + @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + }); + }, }, + else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ @@ -6752,8 +6833,9 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - sema.code = fn_owner_decl.getFileScope().zir; + sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; try mod.declareDeclDependencyType(sema.owner_decl_index, module_fn.owner_decl, .function_body); @@ -6767,14 +6849,17 @@ fn analyzeCall( } const parent_func = sema.func; + const parent_func_index = sema.func_index; sema.func = module_fn; + sema.func_index = module_fn_index.toOptional(); defer sema.func = parent_func; + defer sema.func_index = parent_func_index; const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; defer sema.error_return_trace_index_on_fn_entry = parent_err_ret_index; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, fn_owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, fn_owner_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ @@ -6797,28 +6882,18 @@ fn analyzeCall( defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); - // If it's a comptime function call, we need to memoize it as long as no external - // comptime memory is mutated. - var memoized_call_key: Module.MemoizedCall.Key = undefined; - var delete_memoized_call_key = false; - defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); - if (is_comptime_call) { - memoized_call_key = .{ - .func = module_fn, - .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), - }; - delete_memoized_call_key = true; - } - try sema.emitBackwardBranch(block, call_src); - // Whether this call should be memoized, set to false if the call can mutate - // comptime state. + // Whether this call should be memoized, set to false if the call can mutate comptime state. var should_memoize = true; - var new_fn_info = fn_owner_decl.ty.fnInfo(); - new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len); - new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr; + // If it's a comptime function call, we need to memoize it as long as no external + // comptime memory is mutated. + const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; + new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); + new_fn_info.comptime_bits = 0; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" @@ -6837,31 +6912,31 @@ fn analyzeCall( &child_block, .unneeded, inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, &should_memoize, - memoized_call_key, - func_ty_info.param_types, + memoized_arg_values, + mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, ) catch |err| switch (err) { error.NeededSourceLocation => { _ = sema.inst_map.remove(inst); - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); try sema.analyzeInlineCallArg( block, &child_block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, &should_memoize, - memoized_call_key, - func_ty_info.param_types, + memoized_arg_values, + mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, ); @@ -6897,21 +6972,15 @@ fn analyzeCall( // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = module_fn }; - if (parent_func) |some| { - some.inferred_error_sets.prepend(node); - } - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = module_fn_index, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty; + new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -6920,23 +6989,22 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { - const ty_inst = try sema.addType(fn_ret_ty); - try sema.air_values.append(gpa, result.val); - sema.air_instructions.set(block_inst, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, - }); - break :res2 Air.indexToRef(block_inst); + if (mod.intern_pool.getIfExists(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = .none, + } })) |memoized_call_index| { + const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call; + break :res2 try sema.addConstant( + mod.intern_pool.typeOf(memoized_call.result).toType(), + memoized_call.result.toValue(), + ); } } - const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info); + const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -6968,7 +7036,7 @@ fn analyzeCall( error.ComptimeReturn => break :result inlining.comptime_result, error.AnalysisFail => { const err_msg = sema.err orelse return err; - if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err; + if (mem.eql(u8, err_msg.msg, recursive_msg)) return err; try sema.errNote(block, call_src, err_msg, "called from here", .{}); err_msg.clearTrace(sema.gpa); return err; @@ -6978,11 +7046,11 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, - module_fn, - parent_func.?, + module_fn_index, + parent_func_index.unwrap().?, mod.declPtr(parent_func.?.owner_decl).ty, .dbg_inline_end, ); @@ -6993,23 +7061,11 @@ fn analyzeCall( // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. - // TODO: re-evaluate whether memoized_calls needs its own arena. I think - // it should be fine to use the Decl arena for the function. - { - var arena_allocator = std.heap.ArenaAllocator.init(gpa); - errdefer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - for (memoized_call_key.args) |*arg| { - arg.* = try arg.*.copy(arena); - } - - try mod.memoized_calls.putContext(gpa, memoized_call_key, .{ - .val = try result_val.copy(arena), - .arena = arena_allocator.state, - }, .{ .module = mod }); - delete_memoized_call_key = false; - } + _ = try mod.intern(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = try result_val.intern(fn_ret_ty, mod), + } }); } break :res2 result; @@ -7028,7 +7084,7 @@ fn analyzeCall( .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = func_ty.fnParamType(i); + const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -7037,10 +7093,10 @@ fn analyzeCall( opts, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.analyzeCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), param_ty, uncasted_arg, opts, @@ -7052,11 +7108,11 @@ fn analyzeCall( } else { args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.coerceVarArgParam( block, uncasted_arg, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), ); unreachable; }, @@ -7067,14 +7123,14 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError()) { + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); + if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } if (try sema.resolveMaybeUndefVal(func)) |func_val| { - if (func_val.castTag(.function)) |func_obj| { - try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data); + if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { + try mod.ensureFuncBodyAnalysisQueued(func_index); } } @@ -7096,23 +7152,24 @@ fn analyzeCall( try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); - } else if (block.wantSafety() and func_ty_info.return_type.isNoReturn()) { + } + if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. - check: { - var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; - switch (func_val.tag()) { - .function, .decl_ref => { - _ = try block.addNoOp(.unreach); - return Air.Inst.Ref.unreachable_value; + if (try sema.resolveMaybeUndefVal(func)) |func_val| { + switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func => break :skip_safety, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety, + else => {}, }, - else => break :check, + else => {}, } } - try sema.safetyPanic(block, .noreturn_returned); return Air.Inst.Ref.unreachable_value; - } else if (func_ty_info.return_type.isNoReturn()) { + } + if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7126,17 +7183,18 @@ fn analyzeCall( } fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref { - const target = sema.mod.getTarget(); - const backend = sema.mod.comp.getZigBackend(); + const mod = sema.mod; + const target = mod.getTarget(); + const backend = mod.comp.getZigBackend(); if (!target_util.supportsTailCall(target, backend)) { return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{ @tagName(backend), @tagName(target.cpu.arch), }); } - const func_decl = sema.mod.declPtr(sema.owner_func.?.owner_decl); - if (!func_ty.eql(func_decl.ty, sema.mod)) { + const func_decl = mod.declPtr(sema.owner_func.?.owner_decl); + if (!func_ty.eql(func_decl.ty, mod)) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ - func_ty.fmt(sema.mod), func_decl.ty.fmt(sema.mod), + func_ty.fmt(mod), func_decl.ty.fmt(mod), }); } _ = try block.addUnOp(.ret, result); @@ -7149,16 +7207,17 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: *InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, - memoized_call_key: Module.MemoizedCall.Key, - raw_param_types: []const Type, + memoized_arg_values: []InternPool.Index, + raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime => has_comptime_args.* = true, @@ -7174,13 +7233,14 @@ fn analyzeInlineCallArg( const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { const raw_param_ty = raw_param_types[arg_i.*]; - if (raw_param_ty.tag() != .generic_poison) break :param_ty raw_param_ty; + if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); - break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst); + const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); + break :param_ty param_ty.toIntern(); }; new_fn_info.param_types[arg_i.*] = param_ty; const uncasted_arg = uncasted_args[arg_i.*]; - if (try sema.typeRequiresComptime(param_ty)) { + if (try sema.typeRequiresComptime(param_ty.toType())) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; @@ -7188,7 +7248,7 @@ fn analyzeInlineCallArg( } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); } - const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{ + const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, .param_i = @intCast(u32, arg_i.*), } }) catch |err| switch (err) { @@ -7202,24 +7262,20 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.tag()) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. return error.GenericPoison; }, - else => { - // Needed so that lazy values do not trigger - // assertion due to type not being resolved - // when the hash function is called. - try sema.resolveLazyValue(arg_val); - }, + else => {}, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); - memoized_call_key.args[arg_i.*] = .{ - .ty = param_ty, - .val = arg_val, - }; + // Needed so that lazy values do not trigger + // assertion due to type not being resolved + // when the hash function is called. + const resolved_arg_val = try sema.resolveLazyValue(arg_val); + should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(param_ty.toType(), mod); } else { sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } @@ -7233,7 +7289,7 @@ fn analyzeInlineCallArg( .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i.*]; - new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg); + new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); @@ -7241,24 +7297,20 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.tag()) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. return error.GenericPoison; }, - else => { - // Needed so that lazy values do not trigger - // assertion due to type not being resolved - // when the hash function is called. - try sema.resolveLazyValue(arg_val); - }, + else => {}, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); - memoized_call_key.args[arg_i.*] = .{ - .ty = sema.typeOf(uncasted_arg), - .val = arg_val, - }; + // Needed so that lazy values do not trigger + // assertion due to type not being resolved + // when the hash function is called. + const resolved_arg_val = try sema.resolveLazyValue(arg_val); + should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(sema.typeOf(uncasted_arg), mod); } else { if (zir_tags[inst] == .param_anytype_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); @@ -7298,14 +7350,15 @@ fn analyzeGenericCallArg( uncasted_arg: Air.Inst.Ref, comptime_arg: TypedValue, runtime_args: []Air.Inst.Ref, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, runtime_i: *u32, ) !void { - const is_runtime = comptime_arg.val.tag() == .generic_poison and - comptime_arg.ty.hasRuntimeBits() and + const mod = sema.mod; + const is_runtime = comptime_arg.val.isGenericPoison() and + comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { - const param_ty = new_fn_info.param_types[runtime_i.*]; + const param_ty = new_fn_info.param_types[runtime_i.*].toType(); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); try sema.queueFullTypeResolution(param_ty); runtime_args[runtime_i.*] = casted_arg; @@ -7315,10 +7368,16 @@ fn analyzeGenericCallArg( } } -fn analyzeGenericCallArgVal(sema: *Sema, block: *Block, arg_src: LazySrcLoc, uncasted_arg: Air.Inst.Ref) !Value { - const arg_val = try sema.resolveValue(block, arg_src, uncasted_arg, "parameter is comptime"); - try sema.resolveLazyValue(arg_val); - return arg_val; +fn analyzeGenericCallArgVal( + sema: *Sema, + block: *Block, + arg_src: LazySrcLoc, + arg_ty: Type, + uncasted_arg: Air.Inst.Ref, + reason: []const u8, +) !Value { + const casted_arg = try sema.coerce(block, arg_ty, uncasted_arg, arg_src); + return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, casted_arg, reason)); } fn instantiateGenericCall( @@ -7327,7 +7386,7 @@ fn instantiateGenericCall( func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - func_ty_info: Type.Payload.Function.Data, + generic_func_ty: Type, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7338,46 +7397,41 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = switch (func_val.tag()) { - .function => func_val.castTag(.function).?.data, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func => |function| function.index, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, }; + const module_fn = mod.funcPtr(module_fn_index); // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - const namespace = fn_owner_decl.src_namespace; + const namespace_index = fn_owner_decl.src_namespace; + const namespace = mod.namespacePtr(namespace_index); const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); - // This hash must match `Module.MonomorphedFuncsContext.hash`. - // For parameters explicitly marked comptime and simple parameter type expressions, - // we know whether a parameter is elided from a monomorphed function, and can - // use it in the hash here. However, for parameter type expressions that are not - // explicitly marked comptime and rely on previous parameter comptime values, we - // don't find out until after generating a monomorphed function whether the parameter - // type ended up being a "must-be-comptime-known" type. - var hasher = std.hash.Wyhash.init(0); - std.hash.autoHash(&hasher, module_fn.owner_decl); - - const generic_args = try sema.arena.alloc(GenericCallAdapter.Arg, func_ty_info.param_types.len); - { - var i: usize = 0; + const monomorphed_args = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(generic_func_ty).?.param_types.len); + const callee_index = callee: { + var arg_i: usize = 0; + var monomorphed_arg_i: u32 = 0; + var known_unique = false; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7386,87 +7440,90 @@ fn instantiateGenericCall( else => continue, } - const arg_ty = sema.typeOf(uncasted_args[i]); + defer arg_i += 1; + const param_ty = generic_func_ty_info.param_types[arg_i]; + const is_generic = !is_anytype and param_ty == .generic_poison_type; + + if (known_unique) { + if (is_comptime or is_anytype or is_generic) { + monomorphed_arg_i += 1; + } + continue; + } + + const uncasted_arg = uncasted_args[arg_i]; + const arg_ty = if (is_generic) mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = monomorphed_args[0..monomorphed_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + ) orelse { + known_unique = true; + monomorphed_arg_i += 1; + continue; + } else if (is_anytype) sema.typeOf(uncasted_arg).toIntern() else param_ty; + const was_comptime = is_comptime; + if (!is_comptime and try sema.typeRequiresComptime(arg_ty.toType())) is_comptime = true; if (is_comptime or is_anytype) { // Tuple default values are a part of the type and need to be // resolved to hash the type. - try sema.resolveTupleLazyValues(block, call_src, arg_ty); + try sema.resolveTupleLazyValues(block, call_src, arg_ty.toType()); } if (is_comptime) { - const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) { + const casted_arg = sema.analyzeGenericCallArgVal(block, .unneeded, arg_ty.toType(), uncasted_arg, "") catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src); - _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[i]); + const decl = mod.declPtr(block.src_decl); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); + _ = try sema.analyzeGenericCallArgVal( + block, + arg_src, + arg_ty.toType(), + uncasted_arg, + if (was_comptime) + "parameter is comptime" + else + "argument to parameter with comptime-only type must be comptime-known", + ); unreachable; }, else => |e| return e, }; - arg_val.hashUncoerced(arg_ty, &hasher, mod); - if (is_anytype) { - arg_ty.hashWithHasher(&hasher, mod); - generic_args[i] = .{ - .ty = arg_ty, - .val = arg_val, - .is_anytype = true, - }; - } else { - generic_args[i] = .{ - .ty = arg_ty, - .val = arg_val, - .is_anytype = false, - }; - } - } else if (is_anytype) { - arg_ty.hashWithHasher(&hasher, mod); - generic_args[i] = .{ - .ty = arg_ty, - .val = Value.initTag(.generic_poison), - .is_anytype = true, - }; - } else { - generic_args[i] = .{ - .ty = arg_ty, - .val = Value.initTag(.generic_poison), - .is_anytype = false, - }; + monomorphed_args[monomorphed_arg_i] = casted_arg.toIntern(); + monomorphed_arg_i += 1; + } else if (is_anytype or is_generic) { + monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty }); + monomorphed_arg_i += 1; } - - i += 1; } - } - const precomputed_hash = hasher.final(); + if (!known_unique) { + if (mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = monomorphed_args[0..monomorphed_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + )) |callee_func| break :callee mod.intern_pool.indexToKey(callee_func).func.index; + } - const adapter: GenericCallAdapter = .{ - .generic_fn = module_fn, - .precomputed_hash = precomputed_hash, - .func_ty_info = func_ty_info, - .args = generic_args, - .module = mod, - }; - const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); - const callee = if (!gop.found_existing) callee: { - const new_module_func = try gpa.create(Module.Fn); + const new_module_func_index = try mod.createFunc(undefined); + const new_module_func = mod.funcPtr(new_module_func_index); - // This ensures that we can operate on the hash map before the Module.Fn - // struct is fully initialized. - new_module_func.hash = precomputed_hash; new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); new_module_func.comptime_args = null; - gop.key_ptr.* = new_module_func; try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. - const src_decl_index = namespace.getDeclIndex(); + const src_decl_index = namespace.getDeclIndex(mod); const src_decl = mod.declPtr(src_decl_index); - const new_decl_index = try mod.allocateNewDecl(namespace, fn_owner_decl.src_node, src_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations - const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ - fn_owner_decl.name, @enumToInt(new_decl_index), + const decl_name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + fn_owner_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index), }); new_decl.name = decl_name; new_decl.src_line = fn_owner_decl.src_line; @@ -7488,25 +7545,21 @@ fn instantiateGenericCall( assert(new_decl.dependencies.keys().len == 0); try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const new_func = sema.resolveGenericInstantiationType( block, - new_decl_arena_allocator, fn_zir, new_decl, new_decl_index, uncasted_args, - module_fn, - new_module_func, - namespace, - func_ty_info, + monomorphed_arg_i, + module_fn_index, + new_module_func_index, + namespace_index, + generic_func_ty, call_src, bound_arg_src, ) catch |err| switch (err) { error.GenericPoison, error.ComptimeReturn => { - new_decl_arena.deinit(); // Resolving the new function type below will possibly declare more decl dependencies // and so we remove them all here in case of error. for (new_decl.dependencies.keys()) |dep_index| { @@ -7515,16 +7568,10 @@ fn instantiateGenericCall( } assert(namespace.anon_decls.orderedRemove(new_decl_index)); mod.destroyDecl(new_decl_index); - assert(mod.monomorphed_funcs.remove(new_module_func)); - gpa.destroy(new_module_func); + mod.destroyFunc(new_module_func_index); return err; }, else => { - assert(mod.monomorphed_funcs.remove(new_module_func)); - { - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); - } // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -7535,12 +7582,10 @@ fn instantiateGenericCall( return err; }, }; - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; - } else gop.key_ptr.*; - + }; + const callee = mod.funcPtr(callee_index); callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); @@ -7548,8 +7593,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const new_fn_info = func_ty.fnInfo(); - const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); + const runtime_args_len = @intCast(u32, mod.typeToFunc(func_ty).?.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { var runtime_i: u32 = 0; @@ -7565,18 +7609,18 @@ fn instantiateGenericCall( uncasted_args[total_i], comptime_args[total_i], runtime_args, - new_fn_info, + mod.typeToFunc(func_ty).?, &runtime_i, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.analyzeGenericCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, total_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src), uncasted_args[total_i], comptime_args[total_i], runtime_args, - new_fn_info, + mod.typeToFunc(func_ty).?, &runtime_i, ); unreachable; @@ -7586,16 +7630,16 @@ fn instantiateGenericCall( total_i += 1; } - try sema.queueFullTypeResolution(new_fn_info.return_type); + try sema.queueFullTypeResolution(mod.typeToFunc(func_ty).?.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError()) { + if (sema.owner_func != null and mod.typeToFunc(func_ty).?.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } - try sema.mod.ensureFuncBodyAnalysisQueued(callee); + try mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -7616,7 +7660,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (new_fn_info.return_type.isNoReturn()) { + if (func_ty.fnReturnType(mod).isNoReturn(mod)) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7626,22 +7670,23 @@ fn instantiateGenericCall( fn resolveGenericInstantiationType( sema: *Sema, block: *Block, - new_decl_arena_allocator: Allocator, fn_zir: Zir, new_decl: *Decl, new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, - module_fn: *Module.Fn, - new_module_func: *Module.Fn, - namespace: *Namespace, - func_ty_info: Type.Payload.Function.Data, + monomorphed_args_len: u32, + module_fn_index: Module.Fn.Index, + new_module_func: Module.Fn.Index, + namespace: Namespace.Index, + generic_func_ty: Type, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, -) !*Module.Fn { +) !Module.Fn.Index { const mod = sema.mod; const gpa = sema.gpa; const zir_tags = fn_zir.instructions.items(.tag); + const module_fn = mod.funcPtr(module_fn_index); const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); // Re-run the block that creates the function, with the comptime parameters @@ -7652,23 +7697,26 @@ fn resolveGenericInstantiationType( .mod = mod, .gpa = gpa, .arena = sema.arena, - .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, - .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), + .owner_func_index = .none, + // TODO: fully migrate functions into InternPool + .comptime_args = try mod.tmp_hack_arena.allocator().alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, - .preallocated_new_func = new_module_func, + .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, + .comptime_mutable_decls = sema.comptime_mutable_decls, }; defer child_sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ @@ -7690,18 +7738,19 @@ fn resolveGenericInstantiationType( var arg_i: usize = 0; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7719,8 +7768,8 @@ fn resolveGenericInstantiationType( if (try sema.typeRequiresComptime(arg_ty)) { const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src); + const decl = mod.declPtr(block.src_decl); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known"); unreachable; }, @@ -7752,50 +7801,61 @@ fn resolveGenericInstantiationType( const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; - const new_func = new_func_val.castTag(.function).?.data; - errdefer new_func.deinit(gpa); + const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; assert(new_func == new_module_func); + const monomorphed_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); + const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len); + var monomorphed_arg_i: u32 = 0; + try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod }); + arg_i = 0; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; + var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_anytype = true; + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { + is_anytype = true; is_comptime = true; }, else => continue, } - // We populate the Type here regardless because it is needed by - // `GenericCallAdapter.eql` as well as function body analysis. - // Whether it is anytype is communicated by `isAnytypeParam`. - const arg = child_sema.inst_map.get(inst).?; - const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); + const param_ty = generic_func_ty_info.param_types[arg_i]; + const is_generic = !is_anytype and param_ty == .generic_poison_type; - if (try sema.typeRequiresComptime(copied_arg_ty)) { - is_comptime = true; - } + const arg = child_sema.inst_map.get(inst).?; + const arg_ty = child_sema.typeOf(arg); + + if (is_generic) if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{ + .func = module_fn_index, + .args_index = monomorphed_args_index, + .args_len = monomorphed_arg_i, + }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern()); + if (!is_comptime and try sema.typeRequiresComptime(arg_ty)) is_comptime = true; if (is_comptime) { const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; - child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, - .val = try arg_val.copy(new_decl_arena_allocator), - }; + monomorphed_args[monomorphed_arg_i] = arg_val.toIntern(); + monomorphed_arg_i += 1; + child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = arg_val }; } else { - child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, - .val = Value.initTag(.generic_poison), - }; + if (is_anytype or is_generic) { + monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty.toIntern() }); + monomorphed_arg_i += 1; + } + child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = Value.generic_poison }; } arg_i += 1; @@ -7804,11 +7864,11 @@ fn resolveGenericInstantiationType( try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. - new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); + new_decl.ty = child_sema.typeOf(new_func_inst); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. - const new_fn_info = new_decl.ty.fnInfo(); - if (try sema.typeRequiresComptime(new_fn_info.return_type)) { + const new_fn_info = mod.typeToFunc(new_decl.ty).?; + if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) { return error.ComptimeReturn; } // Similarly, if the call evaluated to a generic type we need to instead @@ -7817,15 +7877,20 @@ fn resolveGenericInstantiationType( return error.GenericPoison; } - new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); + new_decl.val = (try mod.intern(.{ .func = .{ + .ty = new_decl.ty.toIntern(), + .index = new_func, + } })).toValue(); new_decl.@"align" = 0; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.analysis = .complete; - log.debug("generic function '{s}' instantiated with type {}", .{ - new_decl.name, new_decl.ty.fmtDebug(), - }); + mod.monomorphed_funcs.putAssumeCapacityNoClobberContext(.{ + .func = module_fn_index, + .args_index = monomorphed_args_index, + .args_len = monomorphed_arg_i, + }, new_decl.val.toIntern(), .{ .mod = mod }); // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR @@ -7835,46 +7900,46 @@ fn resolveGenericInstantiationType( } fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - if (!ty.isSimpleTupleOrAnonStruct()) return; - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |field_val, i| { - try sema.resolveTupleLazyValues(block, src, tuple.types[i]); - if (field_val.tag() == .unreachable_value) continue; - try sema.resolveLazyValue(field_val); + const mod = sema.mod; + const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |tuple| tuple, + else => return, + }; + for (tuple.types, tuple.values) |field_ty, field_val| { + try sema.resolveTupleLazyValues(block, src, field_ty.toType()); + if (field_val == .none) continue; + // TODO: mutate in intern pool + _ = try sema.resolveLazyValue(field_val.toValue()); } } fn emitDbgInline( sema: *Sema, block: *Block, - old_func: *Module.Fn, - new_func: *Module.Fn, + old_func: Module.Fn.Index, + new_func: Module.Fn.Index, new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { - if (sema.mod.comp.bin_file.options.strip) return; + const mod = sema.mod; + if (mod.comp.bin_file.options.strip) return; // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func)); _ = try block.addInst(.{ .tag = tag, - .data = .{ .ty_pl = .{ + .data = .{ .ty_fn = .{ .ty = try sema.addType(new_func_ty), - .payload = @intCast(u32, sema.air_values.items.len - 1), + .func = new_func, } }, }); } -fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - _ = block; - const tracy = trace(@src()); - defer tracy.end(); - +fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[inst].int_type; - const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); - + const ty = try mod.intType(int_type.signedness, int_type.bit_count); return sema.addType(ty); } @@ -7882,43 +7947,46 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const child_type = try sema.resolveType(block, operand_src, inst_data.operand); - if (child_type.zigTypeTag() == .Opaque) { - return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); - } else if (child_type.zigTypeTag() == .Null) { - return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); + if (child_type.zigTypeTag(mod) == .Opaque) { + return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)}); + } else if (child_type.zigTypeTag(mod) == .Null) { + return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)}); } - const opt_type = try Type.optional(sema.arena, child_type); + const opt_type = try Type.optional(sema.arena, child_type, mod); return sema.addType(opt_type); } fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const bin = sema.code.instructions.items(.data)[inst].bin; const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); - assert(indexable_ty.isIndexable()); // validated by a previous instruction - if (indexable_ty.zigTypeTag() == .Struct) { - const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); + assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction + if (indexable_ty.zigTypeTag(mod) == .Struct) { + const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs), mod); return sema.addType(elem_type); } else { - const elem_type = indexable_ty.elemType2(); + const elem_type = indexable_ty.elemType2(mod); return sema.addType(elem_type); } } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"); + const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); - const vector_type = try Type.Tag.vector.create(sema.arena, .{ - .len = @intCast(u32, len), - .elem_type = elem_type, + const vector_type = try mod.vectorType(.{ + .len = len, + .child = elem_type.toIntern(), }); return sema.addType(vector_type); } @@ -7960,9 +8028,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { - if (elem_type.zigTypeTag() == .Opaque) { - return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)}); - } else if (elem_type.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (elem_type.zigTypeTag(mod) == .Opaque) { + return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)}); + } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } } @@ -7975,9 +8044,10 @@ fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro if (true) { return sema.failWithUseOfAsync(block, inst_data.src()); } + const mod = sema.mod; const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); - const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); + const anyframe_type = try mod.anyframeType(return_type); return sema.addType(anyframe_type); } @@ -7986,6 +8056,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -7993,50 +8064,48 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); - if (error_set.zigTypeTag() != .ErrorSet) { + if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ - error_set.fmt(sema.mod), + error_set.fmt(mod), }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); - const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, sema.mod); + const err_union_ty = try mod.errorUnionType(error_set, payload); return sema.addType(err_union_ty); } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { - if (payload_ty.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ - payload_ty.fmt(sema.mod), + payload_ty.fmt(mod), }); - } else if (payload_ty.zigTypeTag() == .ErrorSet) { + } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ - payload_ty.fmt(sema.mod), + payload_ty.fmt(mod), }); } } fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const tracy = trace(@src()); - defer tracy.end(); - + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - - // Create an anonymous error set type with only this error value, and return the value. - const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); - const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); - return sema.addConstant( - result_type, - try Value.Tag.@"error".create(sema.arena, .{ - .name = kv.key, - }), - ); + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); + // Create an error set type with only this error value, and return the value. + const error_set_type = try mod.singleErrorSetType(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = name, + } })).toValue()); } fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -8044,34 +8113,26 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - switch (val.tag()) { - .@"error" => { - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(Type.err_int, Value.initPayload(&payload.base)); - }, - - // This is not a valid combination with the type `anyerror`. - .the_only_possible_value => unreachable, - - // Assume it's already encoded as an integer. - else => return sema.addConstant(Type.err_int, val), - } + const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + return sema.addConstant(Type.err_int, try mod.intValue( + Type.err_int, + try mod.getErrorValue(err_name), + )); } const op_ty = sema.typeOf(uncasted_operand); try sema.resolveInferredErrorSetTy(block, src, op_ty); - if (!op_ty.isAnyError()) { - const names = op_ty.errorSetNames(); + if (!op_ty.isAnyError(mod)) { + const names = op_ty.errorSetNames(mod); switch (names.len) { - 0 => return sema.addConstant(Type.err_int, Value.zero), - 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), + 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), + 1 => { + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?); + return sema.addIntUnsigned(Type.err_int, int); + }, else => {}, } } @@ -8084,28 +8145,26 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const target = sema.mod.getTarget(); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(target)); - if (int > sema.mod.global_error_set.count() or int == 0) + const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); + if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); - const payload = try sema.arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = sema.mod.error_name_list.items[int] }, - }; - return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); + return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.global_error_set.keys()[int], + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = try sema.addConstant(Type.err_int, Value.zero); + const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -8123,6 +8182,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -8130,7 +8190,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { + if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -8141,32 +8201,32 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_ty.zigTypeTag() != .ErrorSet) - return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)}); - if (rhs_ty.zigTypeTag() != .ErrorSet) - return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); + if (lhs_ty.zigTypeTag(mod) != .ErrorSet) + return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)}); + if (rhs_ty.zigTypeTag(mod) != .ErrorSet) + return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { + if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } - if (lhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (lhs_ty.isAnyError()) { + if (lhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - if (rhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (rhs_ty.isAnyError()) { + if (rhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - const err_set_ty = try lhs_ty.errorSetMerge(sema.arena, rhs_ty); + const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); return sema.addType(err_set_ty); } @@ -8175,27 +8235,27 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.addConstant( - Type.initTag(.enum_literal), - try Value.Tag.enum_literal.create(sema.arena, duped_name), - ); + const name = inst_data.get(sema.code); + return sema.addConstant(.{ .ip_index = .enum_literal_type }, (try mod.intern(.{ + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name), + })).toValue()); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const arena = sema.arena; + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { return sema.fail( block, operand_src, @@ -8207,22 +8267,20 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); }, }; const enum_tag_ty = sema.typeOf(enum_tag); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { - return sema.addConstant(int_tag_ty, opv); + return sema.addConstant(int_tag_ty, try mod.getCoerced(opv, int_tag_ty)); } if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| { - var buffer: Value.Payload.U64 = undefined; - const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); + const val = try enum_tag_val.enumToInt(enum_tag_ty, mod); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } @@ -8231,6 +8289,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -8239,24 +8298,23 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - if (dest_ty.zigTypeTag() != .Enum) { - return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)}); + if (dest_ty.zigTypeTag(mod) != .Enum) { + return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { - if (dest_ty.isNonexhaustiveEnum()) { - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = dest_ty.intTagType(&buffer); + if (dest_ty.isNonexhaustiveEnum(mod)) { + const int_tag_ty = dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } const msg = msg: { const msg = try sema.errMsg( block, src, "int value '{}' out of range of non-exhaustive enum '{}'", - .{ int_val.fmtValue(sema.typeOf(operand), sema.mod), dest_ty.fmt(sema.mod) }, + .{ int_val.fmtValue(sema.typeOf(operand), mod), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -8264,7 +8322,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - if (int_val.isUndef()) { + if (int_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!(try sema.enumHasInt(dest_ty, int_val))) { @@ -8273,7 +8331,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, src, "enum '{}' has no tag with value '{}'", - .{ dest_ty.fmt(sema.mod), int_val.fmtValue(sema.typeOf(operand), sema.mod) }, + .{ dest_ty.fmt(mod), int_val.fmtValue(sema.typeOf(operand), mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -8281,7 +8339,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| { @@ -8295,8 +8353,8 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.requireRuntimeBlock(block, src, operand_src); const result = try block.addTyOp(.intcast, dest_ty, operand); - if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum() and - sema.mod.backendSupportsFeature(.is_named_enum_value)) + if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and + mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, result); try sema.addSafetyCheck(block, ok, .invalid_enum_value); @@ -8329,49 +8387,44 @@ fn analyzeOptionalPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const optional_ptr_ty = sema.typeOf(optional_ptr); - assert(optional_ptr_ty.zigTypeTag() == .Pointer); + assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); - const opt_type = optional_ptr_ty.elemType(); - if (opt_type.zigTypeTag() != .Optional) { - return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); + const opt_type = optional_ptr_ty.childType(mod); + if (opt_type.zigTypeTag(mod) != .Optional) { + return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(mod)}); } - const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ + const child_type = opt_type.optionalChild(mod); + const child_pointer = try Type.ptr(sema.arena, mod, .{ .pointee_type = child_type, - .mutable = !optional_ptr_ty.isConstPtr(), - .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), + .mutable = !optional_ptr_ty.isConstPtr(mod), + .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the optional non-null bit would be set that way. But in this case, // we need to emit a runtime instruction to do it. _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, + } })).toValue()); } } @@ -8397,21 +8450,22 @@ fn zirOptionalPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const result_ty = switch (operand_ty.zigTypeTag()) { - .Optional => try operand_ty.optionalChildAlloc(sema.arena), + const result_ty = switch (operand_ty.zigTypeTag(mod)) { + .Optional => operand_ty.optionalChild(mod), .Pointer => t: { - if (operand_ty.ptrSize() != .C) { + if (operand_ty.ptrSize(mod) != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; - const ptr_info = operand_ty.ptrInfo().data; - break :t try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = try ptr_info.pointee_type.copy(sema.arena), + const ptr_info = operand_ty.ptrInfo(mod); + break :t try Type.ptr(sema.arena, mod, .{ + .pointee_type = ptr_info.pointee_type, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, @@ -8424,13 +8478,10 @@ fn zirOptionalPayload( }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.isNull()) { - return sema.fail(block, src, "unable to unwrap null", .{}); - } - if (val.castTag(.opt_payload)) |payload| { - return sema.addConstant(result_ty, payload.data); - } - return sema.addConstant(result_ty, val); + return if (val.optionalValue(mod)) |payload| + sema.addConstant(result_ty, payload) + else + sema.fail(block, src, "unable to unwrap null", .{}); } try sema.requireRuntimeBlock(block, src, null); @@ -8450,14 +8501,15 @@ fn zirErrUnionPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_src = src; const err_union_ty = sema.typeOf(operand); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false); @@ -8468,24 +8520,27 @@ fn analyzeErrUnionPayload( block: *Block, src: LazySrcLoc, err_union_ty: Type, - operand: Zir.Inst.Ref, + operand: Air.Inst.Ref, operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { - const payload_ty = err_union_ty.errorUnionPayload(); + const mod = sema.mod; + const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - if (val.getError()) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); } - const data = val.castTag(.eu_payload).?.data; - return sema.addConstant(payload_ty, data); + return sema.addConstant( + payload_ty, + mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload.toValue(), + ); } try sema.requireRuntimeBlock(block, src, null); // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err); } @@ -8517,52 +8572,46 @@ fn analyzeErrUnionPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(mod), }); } - const err_union_ty = operand_ty.elemType(); - const payload_ty = err_union_ty.errorUnionPayload(); - const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ + const err_union_ty = operand_ty.childType(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); + const operand_pointer_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = payload_ty, - .mutable = !operand_ty.isConstPtr(), - .@"addrspace" = operand_ty.ptrAddressSpace(), + .mutable = !operand_ty.isConstPtr(mod), + .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the error union error code would be set that way. But in this case, // we need to emit a runtime instruction to do it. try sema.requireRuntimeBlock(block, src, null); _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { - if (val.getError()) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); } - - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, + } })).toValue()); } } @@ -8570,7 +8619,7 @@ fn analyzeErrUnionPayloadPtr( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } @@ -8594,18 +8643,21 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .ErrorUnion) { + if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } - const result_ty = operand_ty.errorUnionSet(); + const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - assert(val.getError() != null); - return sema.addConstant(result_ty, val); + return sema.addConstant(result_ty, (try mod.intern(.{ .err = .{ + .ty = result_ty.toIntern(), + .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -8617,23 +8669,24 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(mod), }); } - const result_ty = operand_ty.elemType().errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { - assert(val.getError() != null); + assert(val.getErrorName(mod) != .none); return sema.addConstant(result_ty, val); } } @@ -8667,7 +8720,7 @@ fn zirFunc( break :blk ret_ty; } else |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, } @@ -8677,8 +8730,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - break :blk try ret_ty_val.toType(&buffer).copy(sema.arena); + break :blk ret_ty_val.toType(); }, }; @@ -8745,10 +8797,10 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.tag() == .type) { - return Value.initTag(.generic_poison_type); + if (dest_ty.toIntern() == .type_type) { + return Value.generic_poison_type; } else { - return Value.initTag(.generic_poison); + return Value.generic_poison; } }, else => |e| return e, @@ -8822,7 +8874,7 @@ fn handleExternLibName( const FuncLinkSection = union(enum) { generic, default, - explicit: []const u8, + explicit: InternPool.NullTerminatedString, }; fn funcCommon( @@ -8849,11 +8901,13 @@ fn funcCommon( noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); - var is_generic = bare_return_type.tag() == .generic_poison or + var is_generic = bare_return_type.isGenericPoison() or alignment == null or address_space == null or section == .generic or @@ -8869,70 +8923,42 @@ fn funcCommon( } var destroy_fn_on_error = false; - const new_func: *Module.Fn = new_func: { + const new_func_index = new_func: { if (!has_body) break :new_func undefined; if (sema.comptime_args_fn_inst == func_inst) { - const new_func = sema.preallocated_new_func.?; - sema.preallocated_new_func = null; // take ownership - break :new_func new_func; + const new_func_index = sema.preallocated_new_func.unwrap().?; + sema.preallocated_new_func = .none; // take ownership + break :new_func new_func_index; } destroy_fn_on_error = true; - const new_func = try sema.gpa.create(Module.Fn); + var new_func: Module.Fn = undefined; // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; - break :new_func new_func; + const new_func_index = try mod.createFunc(new_func); + break :new_func new_func_index; }; - errdefer if (destroy_fn_on_error) sema.gpa.destroy(new_func); + errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); - var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; - errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); - // Note: no need to errdefer since this will still be in its default state at the end of the function. - - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const fn_ty: Type = fn_ty: { - // Hot path for some common function types. - // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. - if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and - alignment.? == 0 and - address_space.? == target_util.defaultAddressSpace(target, .function) and - section == .default and - !is_noinline) - { - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_void_no_args); - } - - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) { - break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) { - break :fn_ty Type.initTag(.fn_ccc_void_no_args); - } - } - // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* // the instantiation, which can depend on comptime parameters. // Related proposal: https://github.com/ziglang/zig/issues/11834 const cc_resolved = cc orelse .Unspecified; - const param_types = try sema.arena.alloc(Type, block.params.items.len); - const comptime_params = try sema.arena.alloc(bool, block.params.items.len); - for (block.params.items, 0..) |param, i| { + const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len); + var comptime_bits: u32 = 0; + for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, noalias_bits >> index) != 0; }; - param_types[i] = param.ty; + dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( block, .unneeded, param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8940,12 +8966,12 @@ fn funcCommon( is_noalias, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); try sema.analyzeParameter( block, - Module.paramSrc(src_node_offset, sema.gpa, decl, i), + Module.paramSrc(src_node_offset, mod, decl, i), param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8961,7 +8987,7 @@ fn funcCommon( var ret_ty_requires_comptime = false; const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.tag() == .generic_poison; + break :rp bare_return_type.isGenericPoison(); } else |err| switch (err) { error.GenericPoison => rp: { is_generic = true; @@ -8970,43 +8996,41 @@ fn funcCommon( else => |e| return e, }; - const return_type = if (!inferred_error_set or ret_poison) + const return_type: Type = if (!inferred_error_set or ret_poison) bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = new_func }; - maybe_inferred_error_set_node = node; - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = new_func_index, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); }; - if (!return_type.isValidReturnType()) { - const opaque_str = if (return_type.zigTypeTag() == .Opaque) "opaque " else ""; + if (!return_type.isValidReturnType(mod)) { + const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ - opaque_str, return_type.fmt(sema.mod), + opaque_str, return_type.fmt(mod), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) { + if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and + !try sema.validateExternType(return_type, .ret_ty)) + { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ - return_type.fmt(sema.mod), @tagName(cc_resolved), + return_type.fmt(mod), @tagName(cc_resolved), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl), return_type, .ret_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; @@ -9024,9 +9048,9 @@ fn funcCommon( block, ret_ty_src, "function with comptime-only return type '{}' requires all parameters to be comptime", - .{return_type.fmt(sema.mod)}, + .{return_type.fmt(mod)}, ); - try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl), return_type); + try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); const tags = sema.code.instructions.items(.tag); const data = sema.code.instructions.items(.data); @@ -9049,7 +9073,7 @@ fn funcCommon( return sema.failWithOwnedErrorMsg(msg); } - const arch = sema.mod.getTarget().cpu.arch; + const arch = mod.getTarget().cpu.arch; if (switch (cc_resolved) { .Unspecified, .C, .Naked, .Async, .Inline => null, .Interrupt => switch (arch) { @@ -9092,8 +9116,7 @@ fn funcCommon( return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); } if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; - for (comptime_params) |ct| is_generic = is_generic or ct; - is_generic = is_generic or ret_ty_requires_comptime; + is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; if (!is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can @@ -9102,68 +9125,58 @@ fn funcCommon( _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); } - break :fn_ty try Type.Tag.function.create(sema.arena, .{ + break :fn_ty try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, - .return_type = return_type, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, - .alignment = alignment orelse 0, + .alignment = if (alignment) |a| InternPool.Alignment.fromByteUnits(a) else .none, .align_is_generic = alignment == null, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, .is_var_args = var_args, .is_generic = is_generic, .is_noinline = is_noinline, - .noalias_bits = noalias_bits, }); }; sema.owner_decl.@"linksection" = switch (section) { - .generic => undefined, - .default => null, - .explicit => |section_name| try sema.perm_arena.dupeZ(u8, section_name), + .generic => .none, + .default => .none, + .explicit => |section_name| section_name.toOptional(), }; sema.owner_decl.@"align" = alignment orelse 0; sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try sema.gpa.create(Module.ExternFn); - errdefer sema.gpa.destroy(new_extern_fn); - - new_extern_fn.* = Module.ExternFn{ - .owner_decl = sema.owner_decl_index, - .lib_name = null, - }; - - if (opt_lib_name) |lib_name| { - new_extern_fn.lib_name = try sema.handleExternLibName(block, .{ - .node_offset_lib_name = src_node_offset, - }, lib_name); - } - - const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn); - extern_fn_payload.* = .{ - .base = .{ .tag = .extern_fn }, - .data = new_extern_fn, - }; - return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ + .ty = fn_ty.toIntern(), + .decl = sema.owner_decl_index, + .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( + gpa, + try sema.handleExternLibName(block, .{ + .node_offset_lib_name = src_node_offset, + }, lib_name), + )).toOptional() else .none, + } })).toValue()); } if (!has_body) { return sema.addType(fn_ty); } - const is_inline = fn_ty.fnCallingConvention() == .Inline; + const is_inline = fn_ty.fnCallingConvention(mod) == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; + const new_func = mod.funcPtr(new_func_index); const hash = new_func.hash; const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl; - const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = func_inst, @@ -9178,15 +9191,10 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - if (maybe_inferred_error_set_node) |node| { - new_func.inferred_error_sets.prepend(node); - } - maybe_inferred_error_set_node = null; - fn_payload.* = .{ - .base = .{ .tag = .function }, - .data = new_func, - }; - return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ + .ty = fn_ty.toIntern(), + .index = new_func_index, + } })).toValue()); } fn analyzeParameter( @@ -9194,29 +9202,32 @@ fn analyzeParameter( block: *Block, param_src: LazySrcLoc, param: Block.Param, - comptime_params: []bool, + comptime_bits: *u32, i: usize, is_generic: *bool, cc: std.builtin.CallingConvention, has_body: bool, is_noalias: bool, ) !void { + const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); - comptime_params[i] = param.is_comptime or requires_comptime; - const this_generic = param.ty.tag() == .generic_poison; + if (param.is_comptime or requires_comptime) { + comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + } + const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; - const target = sema.mod.getTarget(); - if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + const target = mod.getTarget(); + if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (!param.ty.isValidParamType()) { - const opaque_str = if (param.ty.zigTypeTag() == .Opaque) "opaque " else ""; + if (!param.ty.isValidParamType(mod)) { + const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param.ty.fmt(sema.mod), + opaque_str, param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); @@ -9225,15 +9236,15 @@ fn analyzeParameter( }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { + if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param.ty.fmt(sema.mod), @tagName(cc), + param.ty.fmt(mod), @tagName(cc), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -9243,12 +9254,12 @@ fn analyzeParameter( if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ - param.ty.fmt(sema.mod), + param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -9256,7 +9267,7 @@ fn analyzeParameter( return sema.failWithOwnedErrorMsg(msg); } if (!sema.is_generic_instantiation and !this_generic and is_noalias and - !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional())) + !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } @@ -9283,7 +9294,7 @@ fn zirParam( const prev_preallocated_new_func = sema.preallocated_new_func; const prev_no_partial_func_type = sema.no_partial_func_ty; block.params = .{}; - sema.preallocated_new_func = null; + sema.preallocated_new_func = .none; sema.no_partial_func_ty = true; defer { block.params.deinit(sema.gpa); @@ -9309,7 +9320,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9330,7 +9341,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9340,7 +9351,7 @@ fn zirParam( else => |e| return e, } or comptime_syntax; if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.preallocated_new_func != null) { + if (is_comptime and sema.preallocated_new_func != .none) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { @@ -9363,7 +9374,7 @@ fn zirParam( assert(sema.inst_map.remove(inst)); } - if (sema.preallocated_new_func != null) { + if (sema.preallocated_new_func != .none) { if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { // In this case we are instantiating a generic function call with a non-comptime // non-anytype parameter that ended up being a one-possible-type. @@ -9383,7 +9394,7 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); + const result = try sema.addConstant(Type.generic_poison, Value.generic_poison); sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. @@ -9428,7 +9439,7 @@ fn zirParamAnytype( // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9472,13 +9483,14 @@ fn analyzeAs( zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand = try sema.resolveInst(zir_operand); - if (zir_dest_type == .var_args_param) return operand; + if (zir_dest_type == .var_args_param_type) return operand; const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; - if (dest_ty.zigTypeTag() == .NoReturn) { + if (dest_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "cannot cast to noreturn", .{}); } const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index| @@ -9495,15 +9507,19 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); - if (!ptr_ty.isPtrAtRuntime()) { - return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); + if (!ptr_ty.isPtrAtRuntime(mod)) { + return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { - return sema.addConstant(Type.usize, ptr_val); + return sema.addConstant( + Type.usize, + try mod.intValue(Type.usize, (try ptr_val.getUnsignedIntAdvanced(mod, sema)).?), + ); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); return block.addUnOp(.ptrtoint, ptr); @@ -9513,11 +9529,12 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object = try sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9526,11 +9543,12 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index, initializing: b const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object_ptr = try sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, initializing); } @@ -9544,7 +9562,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9557,7 +9575,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false); } @@ -9586,31 +9604,31 @@ fn intCast( operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); if (try sema.isComptimeKnown(operand)) { return sema.coerce(block, dest_ty, operand, operand_src); - } else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src); - const is_vector = dest_ty.zigTypeTag() == .Vector; + const is_vector = dest_ty.zigTypeTag(mod) == .Vector; if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 if (runtime_safety and block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); - const target = sema.mod.getTarget(); - const wanted_info = dest_scalar_ty.intInfo(target); + const wanted_info = dest_scalar_ty.intInfo(mod); const wanted_bits = wanted_info.bits; if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero); - const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); + const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ .tag = .reduce, @@ -9618,7 +9636,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9631,9 +9649,8 @@ fn intCast( try sema.requireRuntimeBlock(block, src, operand_src); if (runtime_safety and block.wantSafety()) { - const target = sema.mod.getTarget(); - const actual_info = operand_scalar_ty.intInfo(target); - const wanted_info = dest_scalar_ty.intInfo(target); + const actual_info = operand_scalar_ty.intInfo(mod); + const wanted_info = dest_scalar_ty.intInfo(mod); const actual_bits = actual_info.bits; const wanted_bits = wanted_info.bits; const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed); @@ -9642,26 +9659,24 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target); - const dest_max_val = if (is_vector) - try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) - else - dest_max_val_scalar; + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty); + const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); if (actual_info.signedness == .signed) { // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_operand_ty = try Type.Tag.int_unsigned.create(sema.arena, actual_bits); + const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits); const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); - break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); - } else dest_max_val; + const one = try mod.intValue(unsigned_operand_ty, 1); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined); + } else try mod.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); const ok = if (is_vector) ok: { @@ -9701,7 +9716,8 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const scalar_zero = try mod.intValue(operand_scalar_ty, 0); + const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -9713,7 +9729,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(operand_ty, Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -9727,6 +9743,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9735,7 +9752,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9751,14 +9768,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}), + => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}), + switch (operand_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9769,11 +9786,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}), - .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}), + switch (operand_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9781,14 +9798,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { - const container = switch (dest_ty.zigTypeTag()) { + .Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) { + const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, }; return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ - dest_ty.fmt(sema.mod), container, + dest_ty.fmt(mod), container, }); }, @@ -9799,7 +9816,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Vector, => {}, } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9815,14 +9832,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), + switch (dest_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(mod)}), else => {}, } @@ -9832,11 +9849,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), - .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}), + switch (dest_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}), else => {}, } @@ -9844,14 +9861,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { - const container = switch (operand_ty.zigTypeTag()) { + .Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) { + const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, }; return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{ - operand_ty.fmt(sema.mod), container, + operand_ty.fmt(mod), container, }); }, @@ -9869,6 +9886,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9877,31 +9895,31 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); - const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { + const target = mod.getTarget(); + const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, else => return sema.fail( block, dest_ty_src, "expected float type, found '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ), }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, operand_src, "expected float type, found '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ), } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, target)); + return sema.addConstant(dest_ty, try operand_val.floatCast(dest_ty, mod)); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -9944,20 +9962,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); const indexable_ty = sema.typeOf(array_ptr); - if (indexable_ty.zigTypeTag() != .Pointer) { + if (indexable_ty.zigTypeTag(mod) != .Pointer) { const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{ - indexable_ty.fmt(sema.mod), + indexable_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - if (indexable_ty.zigTypeTag() == .Array) { + if (indexable_ty.zigTypeTag(mod) == .Array) { try sema.errNote(block, src, msg, "consider using '&' here", .{}); } break :msg msg; @@ -10054,7 +10073,7 @@ fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const len = try sema.resolveInst(extra.len); - const sentinel = try sema.resolveInst(extra.sentinel); + const sentinel = if (extra.sentinel == .none) .none else try sema.resolveInst(extra.sentinel); const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node }; const start_src: LazySrcLoc = .{ .node_offset_slice_start = extra.start_src_node_offset }; const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node }; @@ -10076,6 +10095,8 @@ fn zirSwitchCapture( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; + const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10087,47 +10108,49 @@ fn zirSwitchCapture( const operand_is_ref = cond_tag == .switch_cond_ref; const operand_ptr = try sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; - if (operand_ty.zigTypeTag() == .Union) { - const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const resolved_item_val = try sema.resolveLazyValue(item_val); + if (operand_ty.zigTypeTag(mod) == .Union) { + const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(resolved_item_val, mod).?); + const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| { if (is_ref) { - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = union_val, - .container_ty = operand_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = union_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); } - const tag_and_val = union_val.castTag(.@"union").?.data; - return sema.addConstant(field_ty, tag_and_val.val); + return sema.addConstant( + field_ty, + mod.intern_pool.indexToKey(union_val.toIntern()).un.val.toValue(), + ); } if (is_ref) { - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); } else { return block.addStructFieldVal(operand_ptr, field_index, field_ty); } } else if (is_ref) { - return sema.addConstantMaybeRef(block, operand_ty, item_val, true); + return sema.addConstantMaybeRef(block, operand_ty, resolved_item_val, true); } else { return block.inline_case_capture; } @@ -10144,7 +10167,7 @@ fn zirSwitchCapture( return operand_ptr; } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet => if (block.switch_else_err_ty) |some| { return sema.bitCast(block, some, operand, operand_src, null); } else { @@ -10162,14 +10185,14 @@ fn zirSwitchCapture( switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item, }; - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => { - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(operand_ty).?; const first_item = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, "") catch unreachable; - const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?); + const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, mod).?); const first_field = union_obj.fields.values()[first_field_index]; for (items[1..], 0..) |item, i| { @@ -10177,22 +10200,22 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const field_index = operand_ty.unionTagFieldIndex(item_val, sema.mod).?; + const field_index = operand_ty.unionTagFieldIndex(item_val, mod).?; const field = union_obj.fields.values()[field_index]; - if (!field.ty.eql(first_field.ty, sema.mod)) { + if (!field.ty.eql(first_field.ty, mod)) { const msg = msg: { const raw_capture_src = Module.SwitchProngSrc{ .multi_capture = capture_info.prong_index }; - const capture_src = raw_capture_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const capture_src = raw_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; - const first_item_src = raw_first_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const first_item_src = raw_first_item_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); const raw_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 1 + @intCast(u32, i) } }; - const item_src = raw_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); - try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)}); - try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)}); + const item_src = raw_item_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); + try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(mod)}); + try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -10200,21 +10223,20 @@ fn zirSwitchCapture( } if (is_ref) { - const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ + const field_ty_ptr = try Type.ptr(sema.arena, mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { - return sema.addConstant( - field_ty_ptr, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = op_ptr_val, - .container_ty = operand_ty, - .field_index = first_field_index, - }), - ); + return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ + .ty = field_ty_ptr.toIntern(), + .addr = .{ .field = .{ + .base = op_ptr_val.toIntern(), + .index = first_field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr); @@ -10223,7 +10245,7 @@ fn zirSwitchCapture( if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| { return sema.addConstant( first_field.ty, - operand_val.castTag(.@"union").?.data.val, + mod.intern_pool.indexToKey(operand_val.toIntern()).un.val.toValue(), ); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -10231,28 +10253,23 @@ fn zirSwitchCapture( }, .ErrorSet => { if (is_multi) { - var names: Module.ErrorSet.NameMap = .{}; + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, items.len); for (items) |item| { const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed - const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - names.putAssumeCapacityNoClobber( - item_val.getError().?, - {}, - ); + const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; + names.putAssumeCapacityNoClobber(item_val.getErrorName(mod).unwrap().?, {}); } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.bitCast(block, else_error_ty, operand, operand_src, null); } else { const item_ref = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed - const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getErrorName(mod).unwrap().?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10269,6 +10286,7 @@ fn zirSwitchCapture( } fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_tok; const src = inst_data.src(); @@ -10278,12 +10296,12 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node; const operand_ptr = try sema.resolveInst(cond_data.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; - if (operand_ty.zigTypeTag() != .Union) { + if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, operand_ty); @@ -10301,6 +10319,7 @@ fn zirSwitchCond( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; @@ -10311,7 +10330,7 @@ fn zirSwitchCond( operand_ptr; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Type, .Void, .Bool, @@ -10325,8 +10344,8 @@ fn zirSwitchCond( .ErrorSet, .Enum, => { - if (operand_ty.isSlice()) { - return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.isSlice(mod)) { + return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { return sema.addConstant(operand_ty, opv); @@ -10336,12 +10355,12 @@ fn zirSwitchCond( .Union => { const union_ty = try sema.resolveTypeFields(operand_ty); - const enum_ty = union_ty.unionTagType() orelse { + const enum_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{}); errdefer msg.destroy(sema.gpa); - if (union_ty.declSrcLocOrNull(sema.mod)) |union_src| { - try sema.mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{}); + if (union_ty.declSrcLocOrNull(mod)) |union_src| { + try mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{}); } break :msg msg; }; @@ -10361,17 +10380,19 @@ fn zirSwitchCond( .Vector, .Frame, .AnyFrame, - => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}), } } -const SwitchErrorSet = std.StringHashMap(Module.SwitchProngSrc); +const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, Module.SwitchProngSrc); fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const src_node_offset = inst_data.src_node; @@ -10413,14 +10434,14 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const cond_index = Zir.refToIndex(extra.data.operand).?; const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable; const target_ty = sema.typeOf(raw_operand); - break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; + break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty; }; - const union_originally = maybe_union_ty.zigTypeTag() == .Union; + const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); - var range_set = RangeSet.init(gpa, sema.mod); + var range_set = RangeSet.init(gpa, mod); var true_count: u8 = 0; var false_count: u8 = 0; @@ -10433,12 +10454,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var empty_enum = false; const operand_ty = sema.typeOf(operand); - const err_set = operand_ty.zigTypeTag() == .ErrorSet; + const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet; var else_error_ty: ?Type = null; // Validate usage of '_' prongs. - if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { + if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { const msg = msg: { const msg = try sema.errMsg( block, @@ -10459,14 +10480,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - const target = sema.mod.getTarget(); - // Validate for duplicate items, missing else prong, and invalid range. - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { - seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); - empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(); + seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod)); + empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod); @memset(seen_enum_fields, null); // `range_set` is used for non-exhaustive enum values that do not correspond to any tags. @@ -10521,7 +10540,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } else true; if (special_prong == .@"else") { - if (all_tags_handled and !operand_ty.isNonexhaustiveEnum()) return sema.fail( + if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", @@ -10539,25 +10558,25 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (seen_enum_fields, 0..) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand_ty.enumFieldName(i); + const field_name = operand_ty.enumFieldName(i, mod); try sema.addFieldErrNote( operand_ty, i, msg, - "unhandled enumeration value: '{s}'", - .{field_name}, + "unhandled enumeration value: '{}'", + .{field_name.fmt(&mod.intern_pool)}, ); } - try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(sema.mod), + try mod.errNoteNonLazy( + operand_ty.declSrcLoc(mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum() and !union_originally) { + } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail( block, src, @@ -10614,7 +10633,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError try sema.resolveInferredErrorSetTy(block, src, operand_ty); - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { if (special_prong != .@"else") { return sema.fail( block, @@ -10628,7 +10647,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var maybe_msg: ?*Module.ErrorMsg = null; errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - for (operand_ty.errorSetNames()) |error_name| { + for (operand_ty.errorSetNames(mod)) |error_name| { if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10644,8 +10663,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, msg, - "unhandled error value: 'error.{s}'", - .{error_name}, + "unhandled error value: 'error.{}'", + .{error_name.fmt(ip)}, ); } } @@ -10656,7 +10675,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames().len) { + if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) { // In order to enable common patterns for generic code allow simple else bodies // else => unreachable, // else => return, @@ -10693,18 +10712,17 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError ); } - const error_names = operand_ty.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; + const error_names = operand_ty.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); for (error_names) |error_name| { if (seen_errors.contains(error_name)) continue; names.putAssumeCapacityNoClobber(error_name, {}); } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + // No need to keep the hash map metadata correct; here we + // extract the (sorted) keys only. + else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); } }, .Int, .ComptimeInt => { @@ -10722,7 +10740,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, &range_set, item_ref, - operand_ty, src_node_offset, .{ .scalar = scalar_i }, ); @@ -10745,7 +10762,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, &range_set, item_ref, - operand_ty, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); @@ -10763,7 +10779,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError &range_set, item_first, item_last, - operand_ty, src_node_offset, .{ .range = .{ .prong = multi_i, .item = range_i } }, ); @@ -10774,13 +10789,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } check_range: { - if (operand_ty.zigTypeTag() == .Int) { - var arena = std.heap.ArenaAllocator.init(gpa); - defer arena.deinit(); - - const min_int = try operand_ty.minInt(arena.allocator(), target); - const max_int = try operand_ty.maxInt(arena.allocator(), target); - if (try range_set.spans(min_int, max_int, operand_ty)) { + if (operand_ty.zigTypeTag(mod) == .Int) { + const min_int = try operand_ty.minInt(mod, operand_ty); + const max_int = try operand_ty.maxInt(mod, operand_ty); + if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) { if (special_prong == .@"else") { return sema.fail( block, @@ -10878,15 +10890,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); } - var seen_values = ValueSrcMap.initContext(gpa, .{ - .ty = operand_ty, - .mod = sema.mod, - }); - defer seen_values.deinit(); + var seen_values = ValueSrcMap{}; + defer seen_values.deinit(gpa); var extra_index: usize = special.end; { @@ -10948,7 +10957,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } @@ -10991,6 +11000,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer merges.deinit(gpa); if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { + const resolved_operand_val = try sema.resolveLazyValue(operand_val); var extra_index: usize = special.end; { var scalar_i: usize = 0; @@ -11005,8 +11015,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + const item_val = sema.resolveConstLazyValue(&child_block, .unneeded, item, "") catch unreachable; + if (resolved_operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11031,8 +11041,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + const item_val = sema.resolveConstLazyValue(&child_block, .unneeded, item, "") catch unreachable; + if (resolved_operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11050,8 +11060,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Validation above ensured these will succeed. const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first, "") catch unreachable; const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last, "") catch unreachable; - if ((try sema.compareAll(operand_val, .gte, first_tv.val, operand_ty)) and - (try sema.compareAll(operand_val, .lte, last_tv.val, operand_ty))) + if ((try sema.compareAll(resolved_operand_val, .gte, first_tv.val, operand_ty)) and + (try sema.compareAll(resolved_operand_val, .lte, last_tv.val, operand_ty))) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11080,8 +11090,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag() == .Enum and - (!operand_ty.isNonexhaustiveEnum() or union_originally)) + if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and + (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); const ok = try block.addUnOp(.is_named_enum_value, operand); @@ -11121,7 +11131,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -11133,9 +11143,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // `item` is already guaranteed to be constant known. const analyze_body = if (union_originally) blk: { - const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item, "") catch unreachable; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) { @@ -11197,9 +11207,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item_last_ref = try sema.resolveInst(last_ref); const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable; - while (item.compareAll(.lte, item_last, operand_ty, sema.mod)) : ({ + while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one); + item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) { + error.Overflow => unreachable, + else => |e| return e, + }; }) { cases_len += 1; @@ -11212,8 +11225,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; - const decl = sema.mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + const decl = mod.declPtr(case_block.src_decl); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11241,15 +11254,15 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; - const decl = sema.mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + const decl = mod.declPtr(case_block.src_decl); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11285,8 +11298,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - if (field_ty.zigTypeTag() != .NoReturn) break true; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11366,7 +11379,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var cond_body = try case_block.instructions.toOwnedSlice(gpa); defer gpa.free(cond_body); - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -11409,18 +11422,18 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first or case_block.wantSafety()) { var emit_bb = false; - if (special.is_inline) switch (operand_ty.zigTypeTag()) { + if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { - if (operand_ty.isNonexhaustiveEnum() and !union_originally) { + if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (seen_enum_fields, 0..) |f, i| { if (f != null) continue; cases_len += 1; - const item_val = try Value.Tag.enum_field_index.create(sema.arena, @intCast(u32, i)); + const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i)); const item_ref = try sema.addConstant(operand_ty, item_val); case_block.inline_case_capture = item_ref; @@ -11428,8 +11441,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = child_block.wip_capture_scope; const analyze_body = if (union_originally) blk: { - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); @@ -11449,17 +11462,21 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, .ErrorSet => { - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } - for (operand_ty.errorSetNames()) |error_name| { + for (0..operand_ty.errorSetNames(mod).len) |i| { + const error_name = operand_ty.errorSetNames(mod)[i]; if (seen_errors.contains(error_name)) continue; cases_len += 1; - const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name }); - const item_ref = try sema.addConstant(operand_ty, item_val); + const item_val = try mod.intern(.{ .err = .{ + .ty = operand_ty.toIntern(), + .name = error_name, + } }); + const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11482,7 +11499,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (try it.next()) |cur| { cases_len += 1; - const item_ref = try sema.addConstant(operand_ty, cur); + const item_ref = try sema.addConstant(operand_ty, cur.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11539,19 +11556,19 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), }; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; case_block.inline_case_capture = .none; - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag() == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -11561,9 +11578,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally and !special.is_inline) for (seen_enum_fields, 0..) |seen_field, index| { if (seen_field != null) continue; - const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(maybe_union_ty).?; const field_ty = union_obj.fields.values()[index].ty; - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11620,47 +11637,70 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } const RangeSetUnhandledIterator = struct { - sema: *Sema, - ty: Type, - cur: Value, - max: Value, + mod: *Module, + cur: ?InternPool.Index, + max: InternPool.Index, + range_i: usize, ranges: []const RangeSet.Range, - range_i: usize = 0, - first: bool = true, + limbs: []math.big.Limb, + + const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128); fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { - const target = sema.mod.getTarget(); - const min = try ty.minInt(sema.arena, target); - const max = try ty.maxInt(sema.arena, target); - - return RangeSetUnhandledIterator{ - .sema = sema, - .ty = ty, - .cur = min, - .max = max, + const mod = sema.mod; + const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type; + const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits); + return .{ + .mod = mod, + .cur = (try ty.minInt(mod, ty)).toIntern(), + .max = (try ty.maxInt(mod, ty)).toIntern(), + .range_i = 0, .ranges = range_set.ranges.items, + .limbs = if (needed_limbs > preallocated_limbs) + try sema.arena.alloc(math.big.Limb, needed_limbs) + else + &.{}, }; } - fn next(it: *RangeSetUnhandledIterator) !?Value { - while (it.range_i < it.ranges.len) : (it.range_i += 1) { - if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); - } - it.first = false; - if (it.cur.compareAll(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { - return it.cur; - } - it.cur = it.ranges[it.range_i].last; + fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index { + if (val == it.max) return null; + const int = it.mod.intern_pool.indexToKey(val).int; + + switch (int.storage) { + inline .u64, .i64 => |val_int| { + const next_int = @addWithOverflow(val_int, 1); + if (next_int[1] == 0) + return (try it.mod.intValue(int.ty.toType(), next_int[0])).toIntern(); + }, + .big_int => {}, + .lazy_align, .lazy_size => unreachable, } - if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + + var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const val_bigint = int.storage.toBigInt(&val_space); + + var result_limbs: [preallocated_limbs]math.big.Limb = undefined; + var result_bigint = math.big.int.Mutable.init( + if (it.limbs.len > 0) it.limbs else &result_limbs, + 0, + ); + + result_bigint.addScalar(val_bigint, 1); + return (try it.mod.intValue_big(int.ty.toType(), result_bigint.toConst())).toIntern(); + } + + fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index { + var cur = it.cur orelse return null; + while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) { + defer it.range_i += 1; + cur = (try it.addOne(it.ranges[it.range_i].last)) orelse { + it.cur = null; + return null; + }; } - it.first = false; - if (it.cur.compareAll(.lte, it.max, it.ty, it.sema.mod)) { - return it.cur; - } - return null; + it.cur = try it.addOne(cur); + return cur; } }; @@ -11671,18 +11711,17 @@ fn resolveSwitchItemVal( switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, -) CompileError!TypedValue { +) CompileError!InternPool.Index { + const mod = sema.mod; const item = try sema.resolveInst(item_ref); - const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. - if (sema.resolveConstValue(block, .unneeded, item, "")) |val| { - try sema.resolveLazyValue(val); - return TypedValue{ .ty = item_ty, .val = val }; + if (sema.resolveConstLazyValue(block, .unneeded, item, "")) |val| { + return val.toIntern(); } else |err| switch (err) { error.NeededSourceLocation => { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand); _ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known"); unreachable; }, @@ -11696,17 +11735,17 @@ fn validateSwitchRange( range_set: *RangeSet, first_ref: Zir.Inst.Ref, last_ref: Zir.Inst.Ref, - operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; - const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareAll(.gt, last_val, operand_ty, sema.mod)) { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); + const mod = sema.mod; + const first = try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first); + const last = try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last); + if (first.toValue().compareScalar(.gt, last.toValue(), mod.intern_pool.typeOf(first).toType(), mod)) { + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } - const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); + const maybe_prev_src = try range_set.add(first, last, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } @@ -11715,12 +11754,11 @@ fn validateSwitchItem( block: *Block, range_set: *RangeSet, item_ref: Zir.Inst.Ref, - operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src); + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const maybe_prev_src = try range_set.add(item, item, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } @@ -11733,9 +11771,11 @@ fn validateSwitchItemEnum( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); - const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, sema.mod) orelse { - const maybe_prev_src = try range_set.add(item_tv.val, item_tv.val, item_tv.ty, switch_prong_src); + const ip = &sema.mod.intern_pool; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const int = ip.indexToKey(item).enum_tag.int; + const field_index = ip.indexToKey(ip.typeOf(item)).enum_type.tagValueIndex(ip, int) orelse { + const maybe_prev_src = try range_set.add(int, int, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); }; const maybe_prev_src = seen_fields[field_index]; @@ -11751,9 +11791,10 @@ fn validateSwitchItemError( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const ip = &sema.mod.intern_pool; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = item_tv.val.castTag(.@"error").?.data.name; + const error_name = ip.indexToKey(item).err.name; const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11769,10 +11810,10 @@ fn validateSwitchDupe( src_node_offset: i32, ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; - const gpa = sema.gpa; - const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); - const prev_src = prev_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); + const mod = sema.mod; + const block_src_decl = mod.declPtr(block.src_decl); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); + const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, @@ -11802,20 +11843,21 @@ fn validateSwitchItemBool( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - if (item_val.toBool()) { + const mod = sema.mod; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + if (item.toValue().toBool()) { true_count.* += 1; } else { false_count.* += 1; } if (true_count.* + false_count.* > 2) { - const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(sema.gpa, block_src_decl, src_node_offset, .none); + const block_src_decl = mod.declPtr(block.src_decl); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } -const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage); +const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, Module.SwitchProngSrc); fn validateSwitchItemSparse( sema: *Sema, @@ -11825,8 +11867,8 @@ fn validateSwitchItemSparse( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const kv = (try seen_values.fetchPut(sema.gpa, item, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); } @@ -11864,7 +11906,8 @@ fn validateSwitchNoRange( } fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !bool { - if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false; const tags = sema.code.instructions.items(.tag); for (body) |inst| { @@ -11900,7 +11943,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op .as_node => try sema.zirAsNode(block, inst), .field_val => try sema.zirFieldVal(block, inst), .@"unreachable" => { - if (!sema.mod.comp.formatted_panics) { + if (!mod.comp.formatted_panics) { try sema.safetyPanic(block, .unwrap_error); return true; } @@ -11923,7 +11966,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op }, else => unreachable, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn(mod)) return true; sema.inst_map.putAssumeCapacity(inst, air_inst); } @@ -11931,19 +11974,20 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { + const mod = sema.mod; const index = Zir.refToIndex(cond) orelse return; if (sema.code.instructions.items(.tag)[index] != .is_non_err) return; const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - if (operand_ty.zigTypeTag() == .ErrorSet) { + if (operand_ty.zigTypeTag(mod) == .ErrorSet) { try sema.maybeErrorUnwrapComptime(block, body, err_operand); return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { - if (!operand_ty.isError()) return; - if (val.getError() == null) return; + if (!operand_ty.isError(mod)) return; + if (val.getErrorName(mod) == .none) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } @@ -11965,45 +12009,60 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = inst_data.src(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getError()) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(sema.mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&sema.mod.intern_pool)}); } } } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); - const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, "field name must be comptime-known"); const ty = try sema.resolveTypeFields(unresolved_ty); + const ip = &mod.intern_pool; const has_field = hf: { - if (ty.isSlice()) { - if (mem.eql(u8, field_name, "ptr")) break :hf true; - if (mem.eql(u8, field_name, "len")) break :hf true; - break :hf false; + switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => { + if (ip.stringEqlSlice(field_name, "ptr")) break :hf true; + if (ip.stringEqlSlice(field_name, "len")) break :hf true; + break :hf false; + }, + else => {}, + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len != 0) { + break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null; + } else { + const field_index = field_name.toUnsigned(ip) orelse break :hf false; + break :hf field_index < ty.structFieldCount(mod); + } + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false; + assert(struct_obj.haveFieldTypes()); + break :hf struct_obj.fields.contains(field_name); + }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + break :hf union_obj.fields.contains(field_name); + }, + .enum_type => |enum_type| { + break :hf enum_type.nameIndex(ip, field_name) != null; + }, + .array_type => break :hf ip.stringEqlSlice(field_name, "len"), + else => {}, } - if (ty.castTag(.anon_struct)) |pl| { - break :hf for (pl.data.names) |name| { - if (mem.eql(u8, name, field_name)) break true; - } else false; - } - if (ty.isTuple()) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; - break :hf field_index < ty.structFieldCount(); - } - break :hf switch (ty.zigTypeTag()) { - .Struct => ty.structFields().contains(field_name), - .Union => ty.unionFields().contains(field_name), - .Enum => ty.enumFields().contains(field_name), - .Array => mem.eql(u8, field_name, "len"), - else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(sema.mod), - }), - }; + return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ + ty.fmt(mod), + }); }; if (has_field) { return Air.Inst.Ref.bool_true; @@ -12013,20 +12072,22 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); - const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "decl name must be comptime-known"); + const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "decl name must be comptime-known"); try sema.checkNamespaceType(block, lhs_src, container_type); - const namespace = container_type.getNamespace() orelse return Air.Inst.Ref.bool_false; + const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse + return Air.Inst.Ref.bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) { return Air.Inst.Ref.bool_true; } } @@ -12042,12 +12103,12 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operand_src = inst_data.src(); const operand = inst_data.get(sema.code); - const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { + const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); }, error.PackageNotFound => { - const name = try block.getFileScope().pkg.getName(sema.gpa, mod.*); + const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*); defer sema.gpa.free(name); return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name }); }, @@ -12073,7 +12134,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name = try sema.resolveConstString(block, operand_src, inst_data.operand, "file path name must be comptime-known"); - const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { + const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, @@ -12087,17 +12148,23 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; - - // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at + // TODO instead of using `.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. + const ty = try mod.arrayType(.{ + .len = embed_file.bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); embed_file.owner_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), + ty, + (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = embed_file.bytes }, + } })).toValue(), 0, // default alignment ); @@ -12105,16 +12172,15 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); - - // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); - return result_inst; + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); + const error_set_type = try mod.singleErrorSetType(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = name, + } })).toValue()); } fn zirShl( @@ -12126,6 +12192,7 @@ fn zirShl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12136,11 +12203,10 @@ fn zirShl( const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const target = sema.mod.getTarget(); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const scalar_ty = lhs_ty.scalarType(); - const scalar_rhs_ty = rhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); + const scalar_rhs_ty = rhs_ty.scalarType(mod); // TODO coerce rhs if air_tag is not shl_sat const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); @@ -12149,62 +12215,56 @@ fn zirShl( const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt and air_tag != .shl_sat) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, - scalar_ty.fmt(sema.mod), + scalar_ty.fmt(mod), }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), - scalar_ty.fmt(sema.mod), + rhs_val.fmtValue(scalar_ty, mod), + scalar_ty.fmt(mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), + rhs_val.fmtValue(scalar_ty, mod), }); } } const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } break :rs rhs_src; @@ -12212,25 +12272,25 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { - const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod); + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { break :val shifted.wrapped_result; } - if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) { + if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) { break :val shifted.wrapped_result; } return sema.fail(block, src, "operation caused overflow", .{}); }, - .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) + .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) else - try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), + try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod), - .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) + .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) else - try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), + try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod), else => unreachable, }; @@ -12241,11 +12301,11 @@ fn zirShl( const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or - scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits) + scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, target), + try lhs_ty.maxInt(mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12256,12 +12316,11 @@ fn zirShl( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12290,7 +12349,7 @@ fn zirShl( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector) + const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -12300,7 +12359,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .shl_overflow); @@ -12319,6 +12378,7 @@ fn zirShr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12330,94 +12390,87 @@ fn zirShr( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const target = sema.mod.getTarget(); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); const runtime_src = if (maybe_rhs_val) |rhs_val| rs: { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, - scalar_ty.fmt(sema.mod), + scalar_ty.fmt(mod), }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), - scalar_ty.fmt(sema.mod), + rhs_val.fmtValue(scalar_ty, mod), + scalar_ty.fmt(mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), + rhs_val.fmtValue(scalar_ty, mod), }); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. - const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod); + const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } - const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod); + const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod); return sema.addConstant(lhs_ty, val); } else { break :rs lhs_src; } } else rhs_src; - if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) { + if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } try sema.requireRuntimeBlock(block, src, runtime_src); const result = try block.addBinOp(air_tag, lhs, rhs); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12436,7 +12489,7 @@ fn zirShr( if (air_tag == .shr_exact) { const back = try block.addBinOp(.shl, result, rhs); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try block.addCmpVector(lhs, back, .eq); break :ok try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, @@ -12461,6 +12514,7 @@ fn zirBitwise( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -12475,8 +12529,8 @@ fn zirBitwise( const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - const scalar_type = resolved_type.scalarType(); - const scalar_tag = scalar_type.zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -12484,7 +12538,7 @@ fn zirBitwise( const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); + return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) }); } const runtime_src = runtime: { @@ -12493,9 +12547,9 @@ fn zirBitwise( if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { - .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod), - .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod), - .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod), + .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod), + .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod), + .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod), else => unreachable, }; return sema.addConstant(resolved_type, result_val); @@ -12515,37 +12569,37 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); - const scalar_type = operand_type.scalarType(); + const scalar_type = operand_type.scalarType(mod); - if (scalar_type.zigTypeTag() != .Int) { + if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ - operand_type.fmt(sema.mod), + operand_type.fmt(mod), }); } if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(operand_type); - } else if (operand_type.zigTypeTag() == .Vector) { - const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); - var elem_val_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + } else if (operand_type.zigTypeTag(mod) == .Vector) { + const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); - elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).intern(scalar_type, mod); } - return sema.addConstant( - operand_type, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_type, (try mod.intern(.{ .aggregate = .{ + .ty = operand_type.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { - const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); + const result_val = try val.bitwiseNot(operand_type, sema.arena, mod); return sema.addConstant(operand_type, result_val); } } @@ -12561,18 +12615,19 @@ fn analyzeTupleCat( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const lhs_len = lhs_ty.structFieldCount(); - const rhs_len = rhs_ty.structFieldCount(); + const lhs_len = lhs_ty.structFieldCount(mod); + const rhs_len = rhs_ty.structFieldCount(mod); const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { - return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value)); + return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } if (lhs_len == 0) { return rhs; @@ -12582,42 +12637,48 @@ fn analyzeTupleCat( } const final_len = try sema.usizeCast(block, rhs_src, dest_fields); - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i); - const default_val = lhs_ty.structFieldDefaultValue(i); - values[i] = default_val; + types[i] = lhs_ty.structFieldType(i, mod).toIntern(); + const default_val = lhs_ty.structFieldDefaultValue(i, mod); + values[i] = default_val.toIntern(); const operand_src = lhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i); - const default_val = rhs_ty.structFieldDefaultValue(i); - values[i + lhs_len] = default_val; + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern(); + const default_val = rhs_ty.structFieldDefaultValue(i, mod); + values[i + lhs_len] = default_val.toIntern(); const operand_src = rhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; + values[i + lhs_len] = .none; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -12635,13 +12696,14 @@ fn analyzeTupleCat( try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12650,8 +12712,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_ty = sema.typeOf(rhs); const src = inst_data.src(); - const lhs_is_tuple = lhs_ty.isTuple(); - const rhs_is_tuple = rhs_ty.isTuple(); + const lhs_is_tuple = lhs_ty.isTuple(mod); + const rhs_is_tuple = rhs_ty.isTuple(mod); if (lhs_is_tuple and rhs_is_tuple) { return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs); } @@ -12661,11 +12723,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); - return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); - return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)}); }; const resolved_elem_ty = t: { @@ -12727,73 +12789,71 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), }; - const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, mod); const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag() == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag() == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); break :p null; }; - const runtime_src = if (switch (lhs_ty.zigTypeTag()) { + const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(lhs), .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs), else => unreachable, }) |lhs_val| rs: { - if (switch (rhs_ty.zigTypeTag()) { + if (switch (rhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(rhs), .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, }) |rhs_val| { - const lhs_sub_val = if (lhs_ty.isSinglePointer()) + const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; - const rhs_sub_val = if (rhs_ty.isSinglePointer()) + const rhs_sub_val = if (rhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; - const final_len_including_sent = result_len + @boolToInt(res_sent_val != null); - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; - const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; - const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; + const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; - const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; + const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } - if (res_sent_val) |sent_val| { - element_vals[result_len] = sent_val; - } - const val = try Value.Tag.aggregate.create(sema.arena, element_vals); - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue(), ptr_addrspace != null); } else break :rs rhs_src; } else lhs_src; try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_elem_ty, .@"addrspace" = ptr_as, }); @@ -12815,7 +12875,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (res_sent_val) |sent_val| { const elem_index = try sema.addIntUnsigned(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); - const init = try sema.addConstant(lhs_info.elem_type, sent_val); + const init = try sema.addConstant(lhs_info.elem_type, try mod.getCoerced(sent_val, lhs_info.elem_type)); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } @@ -12841,11 +12901,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { - .Array => return operand_ty.arrayInfo(), + switch (operand_ty.zigTypeTag(mod)) { + .Array => return operand_ty.arrayInfo(mod), .Pointer => { - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); switch (ptr_info.size) { // TODO: in the Many case here this should only work if the type // has a sentinel, and this code should compute the length based @@ -12855,24 +12916,24 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins return Type.ArrayInfo{ .elem_type = ptr_info.pointee_type, .sentinel = ptr_info.sentinel, - .len = val.sliceLen(sema.mod), + .len = val.sliceLen(mod), }; }, .One => { - if (ptr_info.pointee_type.zigTypeTag() == .Array) { - return ptr_info.pointee_type.arrayInfo(); + if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { + return ptr_info.pointee_type.arrayInfo(mod); } }, .C => {}, } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable()) { - assert(!peer_ty.isTuple()); + if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) { + assert(!peer_ty.isTuple(mod)); return .{ - .elem_type = peer_ty.elemType2(), + .elem_type = peer_ty.elemType2(mod), .sentinel = null, - .len = operand_ty.arrayLen(), + .len = operand_ty.arrayLen(mod), }; } }, @@ -12886,52 +12947,54 @@ fn analyzeTupleMul( block: *Block, src_node: i32, operand: Air.Inst.Ref, - factor: u64, + factor: usize, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const tuple_len = operand_ty.structFieldCount(); - const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch + const tuple_len = operand_ty.structFieldCount(mod); + const final_len = std.math.mul(usize, tuple_len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); - if (final_len_u64 == 0) { - return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value)); + if (final_len == 0) { + return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } - const final_len = try sema.usizeCast(block, rhs_src, final_len_u64); - - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; - var i: u32 = 0; - while (i < tuple_len) : (i += 1) { - types[i] = operand_ty.structFieldType(i); - values[i] = operand_ty.structFieldDefaultValue(i); + for (0..tuple_len) |i| { + types[i] = operand_ty.structFieldType(i, mod).toIntern(); + values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern(); const operand_src = lhs_src; // TODO better source location - if (values[i].tag() == .unreachable_value) { + if (values[i] == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; // TODO don't treat unreachable_value as special } } - i = 0; - while (i < factor) : (i += 1) { - mem.copyForwards(Type, types[tuple_len * i ..], types[0..tuple_len]); - mem.copyForwards(Value, values[tuple_len * i ..], values[0..tuple_len]); + for (0..factor) |i| { + mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]); + mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]); } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -12947,13 +13010,14 @@ fn analyzeTupleMul( @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12963,18 +13027,19 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - if (lhs_ty.isTuple()) { + if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known"); - return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); + const factor_casted = try sema.usizeCast(block, rhs_src, factor); + return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted); } // Analyze the lhs first, to catch the case that someone tried to do exponentiation const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{}); }, @@ -12992,15 +13057,13 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); - const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag() == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { - const final_len_including_sent = result_len + @boolToInt(lhs_info.sentinel != null); - - const lhs_sub_val = if (lhs_ty.isSinglePointer()) + const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; @@ -13008,38 +13071,41 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const val = v: { // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. - if (lhs_len == 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, 0); - break :v try Value.Tag.repeated.create(sema.arena, elem_val); + if (lhs_len == 1 and lhs_info.sentinel == null) { + const elem_val = try lhs_sub_val.elemValue(mod, 0); + break :v try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .repeated_elem = elem_val.toIntern() }, + } }); } - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_i); - element_vals[elem_i] = elem_val; + const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); + element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } - if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val; - } - break :v try Value.Tag.aggregate.create(sema.arena, element_vals); + break :v try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } }); }; - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, val.toValue(), ptr_addrspace != null); } try sema.requireRuntimeBlock(block, src, lhs_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = lhs_info.elem_type, .@"addrspace" = ptr_as, }); @@ -13082,6 +13148,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13089,34 +13156,31 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - if (rhs_scalar_ty.isUnsignedInt() or switch (rhs_scalar_ty.zigTypeTag()) { + if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { - return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}); } if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty); - return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod)); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty); + return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) - else - try sema.resolveInst(.zero); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13124,18 +13188,14 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - switch (rhs_scalar_ty.zigTypeTag()) { + switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, - else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}), } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) - else - try sema.resolveInst(.zero); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13161,6 +13221,7 @@ fn zirArithmetic( } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13171,8 +13232,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13181,25 +13242,22 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); - if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or - (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat)) + if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or + (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat)) { // If it makes a difference whether we coerce to ints or floats before doing the division, error. // If lhs % rhs is 0, it doesn't matter. @@ -13207,9 +13265,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs_val = maybe_rhs_val orelse unreachable; const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable; if (!rem.compareAllWithZero(.eq, mod)) { - return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{ - @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod), - }); + return sema.fail( + block, + src, + "ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'", + .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) }, + ); } } @@ -13243,17 +13304,20 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins switch (scalar_tag) { .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13267,10 +13331,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_val.isUndef(mod)) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13281,10 +13345,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13309,8 +13373,13 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } const air_tag = if (is_int) blk: { - if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) { - return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) }); + if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { + return sema.fail( + block, + src, + "division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact", + .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) }, + ); } break :blk Air.Inst.Tag.div_trunc; } else switch (block.float_mode) { @@ -13321,6 +13390,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13331,8 +13401,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13341,19 +13411,16 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13375,19 +13442,22 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13402,10 +13472,10 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!(modulus_val.compareAllWithZero(.eq, mod))) { return sema.fail(block, src, "exact division produced remainder", .{}); } - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13437,7 +13507,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ok = if (!is_int) ok: { const floored = try block.addUnOp(.floor, result); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const eql = try block.addCmpVector(result, floored, .eq); break :ok try block.addInst(.{ .tag = switch (block.float_mode) { @@ -13459,8 +13529,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); - if (resolved_type.zigTypeTag() == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + if (resolved_type.zigTypeTag(mod) == .Vector) { + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13471,7 +13546,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, }); } else { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } @@ -13484,6 +13559,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13494,8 +13570,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13504,20 +13580,17 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13542,17 +13615,20 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13561,10 +13637,10 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: if the RHS is one, return the LHS directly } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_val.isUndef(mod)) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13600,6 +13676,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13610,8 +13687,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13620,20 +13697,17 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13658,17 +13732,20 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13676,10 +13753,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_val.isUndef(mod)) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13690,10 +13767,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13727,39 +13804,34 @@ fn addDivIntOverflowSafety( casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { + const mod = sema.mod; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. - if (!lhs_scalar_ty.isSignedInt()) return; - - const mod = sema.mod; - const target = mod.getTarget(); + if (!lhs_scalar_ty.isSignedInt(mod)) return; // If the LHS is widened to a larger integer type, no overflow is possible. - if (lhs_scalar_ty.intInfo(target).bits < resolved_type.intInfo(target).bits) { + if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) { return; } - const min_int = try resolved_type.minInt(sema.arena, target); - const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); - const neg_one = if (resolved_type.zigTypeTag() == .Vector) - try Value.Tag.repeated.create(sema.arena, neg_one_scalar) - else - neg_one_scalar; + const min_int = try resolved_type.minInt(mod, resolved_type); + const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); + const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; + if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; + if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; } var ok: Air.Inst.Ref = .none; - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { if (maybe_lhs_val == null) { const min_int_ref = try sema.addConstant(resolved_type, min_int); ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq); @@ -13815,8 +13887,13 @@ fn addDivByZeroSafety( // emitted above. if (maybe_rhs_val != null) return; - const ok = if (resolved_type.zigTypeTag() == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const mod = sema.mod; + const scalar_zero = if (is_int) + try mod.intValue(resolved_type.scalarType(mod), 0) + else + try mod.floatValue(resolved_type.scalarType(mod), 0.0); + const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -13827,7 +13904,7 @@ fn addDivByZeroSafety( } }, }); } else ok: { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, ok, .divide_by_zero); @@ -13842,6 +13919,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13852,8 +13930,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13862,20 +13940,19 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13895,20 +13972,26 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ + .ty = resolved_type.toIntern(), + .storage = .{ .repeated_elem = scalar_zero.toIntern() }, + } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - } else if (lhs_scalar_ty.isSignedInt()) { + } else if (lhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13929,7 +14012,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstant(resolved_type, rem_result); } break :rs lhs_src; - } else if (rhs_scalar_ty.isSignedInt()) { + } else if (rhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs rhs_src; @@ -13937,7 +14020,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13947,7 +14030,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef() or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( @@ -13978,32 +14061,31 @@ fn intRem( lhs: Value, rhs: Value, ) CompileError!Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return sema.intRemScalar(lhs, rhs); + return sema.intRemScalar(lhs, rhs, ty); } -fn intRemScalar( - sema: *Sema, - lhs: Value, - rhs: Value, -) CompileError!Value { - const target = sema.mod.getTarget(); +fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -14021,10 +14103,11 @@ fn intRemScalar( var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return Value.fromBigInt(sema.arena, result_r.toConst()); + return mod.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14035,8 +14118,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14048,13 +14131,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14072,12 +14154,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14096,7 +14178,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14104,7 +14186,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14127,6 +14209,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14137,8 +14220,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14150,13 +14233,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14174,12 +14256,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14198,7 +14280,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14206,7 +14288,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14268,7 +14350,7 @@ fn zirOverflowArithmetic( const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); - if (dest_ty.scalarType().zigTypeTag() != .Int) { + if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)}); } @@ -14276,30 +14358,32 @@ fn zirOverflowArithmetic( const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs); const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty); + const overflow_ty = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type.types[1].toType(); var result: struct { inst: Air.Inst.Ref = .none, - wrapped: Value = Value.initTag(.unreachable_value), + wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { + const zero_bit = try mod.intValue(Type.u1, 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14312,12 +14396,12 @@ fn zirOverflowArithmetic( // If the rhs is zero, then the result is lhs and no overflow occured. // Otherwise, if either result is undefined, both results are undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14330,29 +14414,30 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. + const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef()) { + if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14366,22 +14451,22 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } - const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod); + const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } @@ -14420,40 +14505,46 @@ fn zirOverflowArithmetic( } if (result.inst == .none) { - const values = try sema.arena.alloc(Value, 2); - values[0] = result.wrapped; - values[1] = result.overflow_bit; - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + return sema.addConstant(tuple_ty, (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = &.{ + result.wrapped.toIntern(), + result.overflow_bit.toIntern(), + } }, + } })).toValue()); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); element_refs[0] = result.inst; - element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1), result.overflow_bit); + element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1, mod), result.overflow_bit); return block.addAggregateInit(tuple_ty, element_refs); } -fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { - if (ty.zigTypeTag() != .Vector) return val; - return Value.Tag.repeated.create(sema.arena, val); +fn splat(sema: *Sema, ty: Type, val: Value) !Value { + const mod = sema.mod; + if (ty.zigTypeTag(mod) != .Vector) return val; + const repeated = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = val.toIntern() }, + } }); + return repeated.toValue(); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const ov_ty = if (ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const mod = sema.mod; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .u1_type, + }) else Type.u1; - const types = try sema.arena.alloc(Type, 2); - const values = try sema.arena.alloc(Value, 2); - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ - .types = types, - .values = values, - }); - - types[0] = ty; - types[1] = ov_ty; - values[0] = Value.initTag(.unreachable_value); - values[1] = Value.initTag(.unreachable_value); - - return tuple_ty; + const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; + const values = [2]InternPool.Index{ .none, .none }; + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .types = &types, + .values = &values, + .names = &.{}, + } }); + return tuple_ty.toType(); } fn analyzeArithmetic( @@ -14468,13 +14559,14 @@ fn analyzeArithmetic( rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { + if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) { .One, .Slice => {}, .Many, .C => { const air_tag: Air.Inst.Tag = switch (zir_tag) { @@ -14491,18 +14583,16 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { @@ -14516,12 +14606,12 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14534,7 +14624,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14543,16 +14633,16 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (is_int) { - const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(sum, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vector_index); + var overflow_idx: ?usize = null; + const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type, &overflow_idx); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vec_idx); } return sema.addConstant(resolved_type, sum); } else { return sema.addConstant( resolved_type, - try sema.floatAdd(lhs_val, rhs_val, resolved_type), + try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14563,13 +14653,13 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14588,12 +14678,12 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14601,7 +14691,7 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { const val = if (scalar_tag == .ComptimeInt) - try sema.intAdd(lhs_val, rhs_val, resolved_type) + try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod); @@ -14618,7 +14708,7 @@ fn analyzeArithmetic( // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14631,7 +14721,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14640,16 +14730,16 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (is_int) { - const diff = try sema.intSub(lhs_val, rhs_val, resolved_type); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(diff, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vector_index); + var overflow_idx: ?usize = null; + const diff = try sema.intSub(lhs_val, rhs_val, resolved_type, &overflow_idx); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vec_idx); } return sema.addConstant(resolved_type, diff); } else { return sema.addConstant( resolved_type, - try sema.floatSub(lhs_val, rhs_val, resolved_type), + try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14660,7 +14750,7 @@ fn analyzeArithmetic( // If the RHS is zero, then the other operand is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14669,7 +14759,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14685,7 +14775,7 @@ fn analyzeArithmetic( // If the RHS is zero, result is LHS. // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14693,12 +14783,12 @@ fn analyzeArithmetic( } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { const val = if (scalar_tag == .ComptimeInt) - try sema.intSub(lhs_val, rhs_val, resolved_type) + try sema.intSub(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod); @@ -14718,62 +14808,74 @@ fn analyzeArithmetic( // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { - if (lhs_val.isNan()) { + if (!lhs_val.isUndef(mod)) { + if (lhs_val.isNan(mod)) { return sema.addConstant(resolved_type, lhs_val); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isNan()) { + if (rhs_val.isNan(mod)) { return sema.addConstant(resolved_type, rhs_val); } - if (rhs_val.isInf()) { - return sema.addConstant(resolved_type, try Value.Tag.float_32.create(sema.arena, std.math.nan_f32)); + if (rhs_val.isInf(mod)) { + return sema.addConstant( + resolved_type, + try mod.floatValue(resolved_type, std.math.nan_f128), + ); } } else if (resolved_type.isAnyFloat()) { break :lz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(resolved_type); } } - if (rhs_val.isNan()) { + if (rhs_val.isNan(mod)) { return sema.addConstant(resolved_type, rhs_val); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isInf()) { - return sema.addConstant(resolved_type, try Value.Tag.float_32.create(sema.arena, std.math.nan_f32)); + if (lhs_val.isInf(mod)) { + return sema.addConstant( + resolved_type, + try mod.floatValue(resolved_type, std.math.nan_f128), + ); } } else if (resolved_type.isAnyFloat()) { break :rz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14781,16 +14883,16 @@ fn analyzeArithmetic( } } if (is_int) { - const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index); + var overflow_idx: ?usize = null; + const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx); } return sema.addConstant(resolved_type, product); } else { return sema.addConstant( resolved_type, - try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod), + try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; @@ -14801,40 +14903,46 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } return sema.addConstant( resolved_type, - try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod), + try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod), ); } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14844,41 +14952,47 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod) + try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod) else - try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod); + try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod); return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; @@ -14910,7 +15024,7 @@ fn analyzeArithmetic( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (resolved_type.zigTypeTag() == .Vector) + const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -14920,7 +15034,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .integer_overflow); @@ -14944,15 +15058,12 @@ fn analyzePtrArithmetic( // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); - const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array) - ptr_info.pointee_type.childType() - else - ptr_info.pointee_type; + const ptr_info = ptr_ty.ptrInfo(mod); + assert(ptr_info.size == .Many or ptr_info.size == .C); const new_ptr_ty = t: { // Calculate the new pointer alignment. @@ -14963,9 +15074,9 @@ fn analyzePtrArithmetic( } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = elem_ty.abiSize(target); + const elem_size = ptr_info.pointee_type.abiSize(mod); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target)); + const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod)); break :a elem_size * off_int; } else elem_size; @@ -14974,7 +15085,7 @@ fn analyzePtrArithmetic( // non zero). const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align")); - break :t try Type.ptr(sema.arena, sema.mod, .{ + break :t try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type, .sentinel = ptr_info.sentinel, .@"align" = new_align, @@ -14989,24 +15100,24 @@ fn analyzePtrArithmetic( const runtime_src = rs: { if (opt_ptr_val) |ptr_val| { if (opt_off_val) |offset_val| { - if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); + if (ptr_val.isUndef(mod)) return sema.addConstUndef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target)); + const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; - if (try ptr_val.getUnsignedIntAdvanced(target, sema)) |addr| { - const elem_size = elem_ty.abiSize(target); + if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| { + const elem_size = ptr_info.pointee_type.abiSize(mod); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; - const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); + const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15052,7 +15163,7 @@ fn zirAsm( const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; - const is_global_assembly = sema.func == null; + const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source); @@ -15116,6 +15227,7 @@ fn zirAsm( const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); + const mod = sema.mod; for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); @@ -15123,9 +15235,9 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); - switch (uncasted_arg_ty.zigTypeTag()) { - .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), - .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), + switch (uncasted_arg_ty.zigTypeTag(mod)) { + .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src), + .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; try sema.queueFullTypeResolution(uncasted_arg_ty); @@ -15205,6 +15317,7 @@ fn zirCmpEq( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); @@ -15215,8 +15328,8 @@ fn zirCmpEq( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { @@ -15227,16 +15340,16 @@ fn zirCmpEq( } // comparing null with optionals - if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr())) { + if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, rhs, op == .neq); } - if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr())) { + if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, lhs, op == .neq); } if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; - return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(sema.mod)}); + return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)}); } if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) { @@ -15250,15 +15363,12 @@ fn zirCmpEq( const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(lhs)) |lval| { if (try sema.resolveMaybeUndefVal(rhs)) |rval| { - if (lval.isUndef() or rval.isUndef()) { + if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, - // or calling to Module.getErrorValue to get the values and then compare them is - // faster. - const lhs_name = lval.castTag(.@"error").?.data.name; - const rhs_name = rval.castTag(.@"error").?.data.name; - if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + const lkey = mod.intern_pool.indexToKey(lval.toIntern()); + const rkey = mod.intern_pool.indexToKey(rval.toIntern()); + if ((lkey.err.name == rkey.err.name) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15276,7 +15386,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_as_type.eql(rhs_as_type, sema.mod) == (op == .eq)) { + if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15295,12 +15405,13 @@ fn analyzeCmpUnionTag( tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); - const union_tag_ty = union_ty.unionTagType() orelse { + const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(union_ty.declSrcLoc(sema.mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(sema.mod)}); + try mod.errNoteNonLazy(union_ty.declSrcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -15311,9 +15422,9 @@ fn analyzeCmpUnionTag( const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { - if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); - const field_ty = union_ty.unionFieldType(enum_val, sema.mod); - if (field_ty.zigTypeTag() == .NoReturn) { + if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool); + const field_ty = union_ty.unionFieldType(enum_val, mod); + if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } } @@ -15352,34 +15463,35 @@ fn analyzeCmp( rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - if (lhs_ty.zigTypeTag() != .Optional and rhs_ty.zigTypeTag() != .Optional) { + if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); } - if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) { return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { + if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorUnion and rhs_ty.zigTypeTag() == .ErrorSet) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) { const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs); return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorSet and rhs_ty.zigTypeTag() == .ErrorUnion) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) { const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs); return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - if (!resolved_type.isSelfComparable(is_equality_cmp)) { + if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ - compareOperatorName(op), resolved_type.fmt(sema.mod), + compareOperatorName(op), resolved_type.fmt(mod), }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -15408,15 +15520,19 @@ fn cmpSelf( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { - if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - if (resolved_type.zigTypeTag() == .Vector) { - const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); + if (resolved_type.zigTypeTag(mod) == .Vector) { + const result_ty = try mod.vectorType(.{ + .len = resolved_type.vectorLen(mod), + .child = .bool_type, + }); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); } @@ -15427,7 +15543,7 @@ fn cmpSelf( return Air.Inst.Ref.bool_false; } } else { - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } @@ -15436,9 +15552,9 @@ fn cmpSelf( } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); } } @@ -15446,7 +15562,7 @@ fn cmpSelf( } }; try sema.requireRuntimeBlock(block, src, runtime_src); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { return block.addCmpVector(casted_lhs, casted_rhs, op); } const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized); @@ -15475,16 +15591,17 @@ fn runtimeBoolCmp( } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}), .Type, .EnumLiteral, @@ -15509,25 +15626,25 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiSize(target, sema.arena); - if (val.tag() == .lazy_size) { + const val = try ty.lazyAbiSize(mod); + if (val.isLazySize(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}), .Type, .EnumLiteral, @@ -15552,8 +15669,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const bit_size = try operand_ty.bitSizeAdvanced(target, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); return sema.addIntUnsigned(Type.comptime_int, bit_size); } @@ -15562,17 +15678,13 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const this_decl_index = block.namespace.getDeclIndex(); + const mod = sema.mod; + const this_decl_index = mod.namespaceDeclIndex(block.namespace); const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); return sema.analyzeDeclVal(block, src, this_decl_index); } -fn zirClosureCapture( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!void { - // TODO: Compile error when closed over values are modified +fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_tok; // Closures are not necessarily constant values. For example, the // code might do something like this: @@ -15580,26 +15692,24 @@ fn zirClosureCapture( // ...in which case the closure_capture instruction has access to a runtime // value only. In such case we preserve the type and use a dummy runtime value. const operand = try sema.resolveInst(inst_data.operand); - const val = (try sema.resolveMaybeUndefValAllowVariables(operand)) orelse - Value.initTag(.unreachable_value); - - try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ - .ty = try sema.typeOf(operand).copy(sema.perm_arena), - .val = try val.copy(sema.perm_arena), - }); + const ty = sema.typeOf(operand); + const capture: CaptureScope.Capture = blk: { + if (try sema.resolveMaybeUndefValAllowVariables(operand)) |val| { + const ip_index = try val.intern(ty, sema.mod); + break :blk .{ .comptime_val = ip_index }; + } + break :blk .{ .runtime_val = ty.toIntern() }; + }; + try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, capture); } -fn zirClosureGet( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!Air.Inst.Ref { - // TODO CLOSURE: Test this with inline functions +fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].inst_node; - var scope: *CaptureScope = sema.mod.declPtr(block.src_decl).src_scope.?; + var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. - const tv = while (true) { + const capture = while (true) { // Note: We don't need to add a dependency here, because // decls always depend on their lexical parents. @@ -15612,17 +15722,17 @@ fn zirClosureGet( } return error.AnalysisFail; } - if (scope.captures.getPtr(inst_data.inst)) |tv| { - break tv; + if (scope.captures.get(inst_data.inst)) |capture| { + break capture; } scope = scope.parent.?; }; - if (tv.val.tag() == .unreachable_value and !block.is_typeof and sema.func == null) { + if (capture == .runtime_val and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15646,11 +15756,11 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (capture == .runtime_val and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15676,13 +15786,17 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value) { - assert(block.is_typeof); - // We need a dummy runtime instruction with the correct type. - return block.addTy(.alloc, tv.ty); + switch (capture) { + .runtime_val => |ty_ip_index| { + assert(block.is_typeof); + // We need a dummy runtime instruction with the correct type. + return block.addTy(.alloc, ty_ip_index.toType()); + }, + .comptime_val => |val_ip_index| { + const ty = mod.intern_pool.typeOf(val_ip_index).toType(); + return sema.addConstant(ty, val_ip_index.toValue()); + }, } - - return sema.addConstant(tv.ty, tv.val); } fn zirRetAddr( @@ -15717,345 +15831,422 @@ fn zirBuiltinSrc( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); + const fn_owner_decl = mod.declPtr(func.owner_decl); const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = std.mem.span(fn_owner_decl.name); - const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(fn_owner_decl.name)); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1), - try Value.Tag.bytes.create(anon_decl.arena(), bytes), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; const file_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); // The compiler must not call realpath anywhere. - const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); + const name = try fn_owner_decl.getFileScope(mod).fullPathZ(sema.arena); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len), - try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const field_values = try sema.arena.alloc(Value, 4); - // file: [:0]const u8, - field_values[0] = file_name_val; - // fn_name: [:0]const u8, - field_values[1] = func_name_val; - // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1)); - // column: u32, - field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1); - - return sema.addConstant( - try sema.getBuiltinType("SourceLocation"), - try Value.Tag.aggregate.create(sema.arena, field_values), - ); + const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const fields = .{ + // file: [:0]const u8, + file_name_val, + // fn_name: [:0]const u8, + func_name_val, + // line: u32, + try mod.intern(.{ .runtime_value = .{ + .ty = .u32_type, + .val = (try mod.intValue(Type.u32, extra.line + 1)).toIntern(), + } }), + // column: u32, + (try mod.intValue(Type.u32, extra.column + 1)).toIntern(), + }; + return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ + .ty = src_loc_ty.toIntern(), + .storage = .{ .elems = &fields }, + } })).toValue()); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); - const target = sema.mod.getTarget(); + const type_info_tag_ty = type_info_ty.unionTagType(mod).?; - switch (ty.zigTypeTag()) { - .Type => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)), - .val = Value.void, - }), - ), - .Void => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)), - .val = Value.void, - }), - ), - .Bool => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)), - .val = Value.void, - }), - ), - .NoReturn => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)), - .val = Value.void, - }), - ), - .ComptimeFloat => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)), - .val = Value.void, - }), - ), - .ComptimeInt => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)), - .val = Value.void, - }), - ), - .Undefined => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)), - .val = Value.void, - }), - ), - .Null => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)), - .val = Value.void, - }), - ), - .EnumLiteral => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)), - .val = Value.void, - }), - ), + switch (ty.zigTypeTag(mod)) { + .Type, + .Void, + .Bool, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).toIntern(), + .val = .void_value, + } })).toValue()), .Fn => { // TODO: look into memoizing this result. - const info = ty.fnInfo(); - var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); - const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); + const fn_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Fn"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try sema.ensureDeclAnalyzed(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); + const fn_info_ty = fn_info_decl.val.toType(); + + const param_info_decl_index = (try sema.namespaceLookup( + block, + src, + fn_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Param"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try sema.ensureDeclAnalyzed(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); + const param_info_ty = param_info_decl.val.toType(); + + const param_vals = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(ty).?.param_types.len); for (param_vals, 0..) |*param_val, i| { + const info = mod.typeToFunc(ty).?; const param_ty = info.param_types[i]; - const is_generic = param_ty.tag() == .generic_poison; - const param_ty_val = if (is_generic) - Value.null - else - try Value.Tag.opt_payload.create( - params_anon_decl.arena(), - try Value.Tag.ty.create(params_anon_decl.arena(), try param_ty.copy(params_anon_decl.arena())), - ); + const is_generic = param_ty == .generic_poison_type; + const param_ty_val = try ip.get(gpa, .{ .opt = .{ + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), + .val = if (is_generic) .none else param_ty, + } }); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, info.noalias_bits >> index) != 0; }; - const param_fields = try params_anon_decl.arena().create([3]Value); - param_fields.* = .{ + const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic), + Value.makeBool(is_generic).toIntern(), // is_noalias: bool, - Value.makeBool(is_noalias), + Value.makeBool(is_noalias).toIntern(), // type: ?type, param_ty_val, }; - param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); + param_val.* = try mod.intern(.{ .aggregate = .{ + .ty = param_info_ty.toIntern(), + .storage = .{ .elems = ¶m_fields }, + } }); } const args_val = v: { - const fn_info_decl_index = (try sema.namespaceLookup( - block, - src, - type_info_ty.getNamespace().?, - "Fn", - )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); - try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); - var fn_ty_buffer: Value.ToTypeBuffer = undefined; - const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer); - const param_info_decl_index = (try sema.namespaceLookup( - block, - src, - fn_ty.getNamespace().?, - "Param", - )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); - try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = sema.mod.declPtr(param_info_decl_index); - var param_buffer: Value.ToTypeBuffer = undefined; - const param_ty = param_info_decl.val.toType(¶m_buffer); + const new_decl_ty = try mod.arrayType(.{ + .len = param_vals.len, + .child = param_info_ty.toIntern(), + }); const new_decl = try params_anon_decl.finish( - try Type.Tag.array.create(params_anon_decl.arena(), .{ - .len = param_vals.len, - .elem_type = try param_ty.copy(params_anon_decl.arena()), - }), - try Value.Tag.aggregate.create( - params_anon_decl.arena(), - param_vals, - ), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .elems = param_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, param_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = param_info_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), + } }); }; - const ret_ty_opt = if (info.return_type.tag() != .generic_poison) - try Value.Tag.opt_payload.create( - sema.arena, - try Value.Tag.ty.create(sema.arena, info.return_type), - ) - else - Value.null; + const info = mod.typeToFunc(ty).?; + const ret_ty_opt = try mod.intern(.{ .opt = .{ + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), + .val = if (info.return_type == .generic_poison_type) .none else info.return_type, + } }); - const field_values = try sema.arena.create([6]Value); - field_values.* = .{ + const callconv_ty = try sema.getBuiltinType("CallingConvention"); + + const field_values = .{ // calling_convention: CallingConvention, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).toIntern(), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)), + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), // is_generic: bool, - Value.makeBool(info.is_generic), + Value.makeBool(info.is_generic).toIntern(), // is_var_args: bool, - Value.makeBool(info.is_var_args), + Value.makeBool(info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, args_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = fn_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Int => { - const info = ty.intInfo(target); - const field_values = try sema.arena.alloc(Value, 2); - // signedness: Signedness, - field_values[0] = try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(info.signedness), - ); - // bits: comptime_int, - field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); + const int_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Int"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); + try sema.ensureDeclAnalyzed(int_info_decl_index); + const int_info_decl = mod.declPtr(int_info_decl_index); + const int_info_ty = int_info_decl.val.toType(); - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const signedness_ty = try sema.getBuiltinType("Signedness"); + const info = ty.intInfo(mod); + const field_values = .{ + // signedness: Signedness, + try (try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness))).intern(signedness_ty, mod), + // bits: u16, + (try mod.intValue(Type.u16, info.bits)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = int_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Float => { - const field_values = try sema.arena.alloc(Value, 1); - // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); + const float_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Float"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); + try sema.ensureDeclAnalyzed(float_info_decl_index); + const float_info_decl = mod.declPtr(float_info_decl_index); + const float_info_ty = float_info_decl.val.toType(); - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_vals = .{ + // bits: u16, + (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = float_info_ty.toIntern(), + .storage = .{ .elems = &field_vals }, + } }), + } })).toValue()); }, .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, info.@"align") + try mod.intValue(Type.comptime_int, info.@"align") else - try info.pointee_type.lazyAbiAlignment(target, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod); - const field_values = try sema.arena.create([8]Value); - field_values.* = .{ - // size: Size, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)), - // is_const: bool, - Value.makeBool(!info.mutable), - // is_volatile: bool, - Value.makeBool(info.@"volatile"), - // alignment: comptime_int, - alignment, - // address_space: AddressSpace - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")), - // child: type, - try Value.Tag.ty.create(sema.arena, info.pointee_type), - // is_allowzero: bool, - Value.makeBool(info.@"allowzero"), - // sentinel: ?*const anyopaque, - try sema.optRefValue(block, info.pointee_type, info.sentinel), + const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const pointer_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Pointer"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; + const ptr_size_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + pointer_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Size"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // size: Size, + try (try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size))).intern(ptr_size_ty, mod), + // is_const: bool, + Value.makeBool(!info.mutable).toIntern(), + // is_volatile: bool, + Value.makeBool(info.@"volatile").toIntern(), + // alignment: comptime_int, + alignment.toIntern(), + // address_space: AddressSpace + try (try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace"))).intern(addrspace_ty, mod), + // child: type, + info.pointee_type.toIntern(), + // is_allowzero: bool, + Value.makeBool(info.@"allowzero").toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.pointee_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = pointer_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Array => { - const info = ty.arrayInfo(); - const field_values = try sema.arena.alloc(Value, 3); - // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); - // child: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); - // sentinel: ?*const anyopaque, - field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel); + const array_field_ty = t: { + const array_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Array"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); + try sema.ensureDeclAnalyzed(array_field_ty_decl_index); + const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); + break :t array_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.elem_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = array_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Vector => { - const info = ty.arrayInfo(); - const field_values = try sema.arena.alloc(Value, 2); - // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); - // child: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); + const vector_field_ty = t: { + const vector_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Vector"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); + try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); + const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); + break :t vector_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Vector)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = vector_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Optional => { - const field_values = try sema.arena.alloc(Value, 1); - // child: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); + const optional_field_ty = t: { + const optional_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Optional"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); + try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); + const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); + break :t optional_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // child: type, + ty.optionalChild(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = optional_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .ErrorSet => { var fields_anon_decl = try block.startAnonDecl(); @@ -16066,17 +16257,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const set_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "Error", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Error"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); - const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); + break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(error_field_ty); // If the error set is inferred it must be resolved at this point try sema.resolveInferredErrorSetTy(block, src, ty); @@ -16084,90 +16274,119 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: { - const names = ty.errorSetNames(); - const vals = try fields_anon_decl.arena().alloc(Value, names.len); + const error_field_vals = if (ty.isAnyError(mod)) null else blk: { + const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); for (vals, 0..) |*field_val, i| { - const name = names[i]; + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const error_field_fields = try fields_anon_decl.arena().create([1]Value); - error_field_fields.* = .{ + const error_field_fields = .{ // name: []const u8, name_val, }; - - field_val.* = try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - error_field_fields, - ); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.toIntern(), + .storage = .{ .elems = &error_field_fields }, + } }); } break :blk vals; }; // Build our ?[]const Error value - const errors_val = if (error_field_vals) |vals| v: { + const slice_errors_ty = try mod.ptrType(.{ + .child = error_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + }); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); + const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { + const array_errors_ty = try mod.arrayType(.{ + .len = vals.len, + .child = error_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = vals.len, - .elem_type = error_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - vals, - ), + array_errors_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_errors_ty.toIntern(), + .storage = .{ .elems = vals }, + } })).toValue(), 0, // default alignment ); - - const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = new_decl_val, - .len = try Value.Tag.int_u64.create(sema.arena, vals.len), - }); - break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); - } else Value.null; + break :v try mod.intern(.{ .ptr = .{ + .ty = slice_errors_ty.toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, vals.len)).toIntern(), + } }); + } else .none; + const errors_val = try mod.intern(.{ .opt = .{ + .ty = opt_slice_errors_ty.toIntern(), + .val = errors_payload_val, + } }); // Construct Type{ .ErrorSet = errors_val } - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorSet)), - .val = errors_val, - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).toIntern(), + .val = errors_val, + } })).toValue()); }, .ErrorUnion => { - const field_values = try sema.arena.alloc(Value, 2); - // error_set: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); - // payload: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); + const error_union_field_ty = t: { + const error_union_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "ErrorUnion"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); + try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); + const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); + break :t error_union_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // error_set: type, + ty.errorUnionSet(mod).toIntern(), + // payload: type, + ty.errorUnionPayload(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = error_union_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena); - - const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); + const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive); var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); @@ -16176,88 +16395,121 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "EnumField", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "EnumField"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); - const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); + break :t enum_field_ty_decl.val.toType(); }; - const enum_fields = ty.enumFields(); - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count()); - + const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, i), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); - - var buffer: Value.Payload.U64 = undefined; - const int_val = try tag_val.enumToInt(ty, &buffer).copy(fields_anon_decl.arena()); - - const name = enum_fields.keys()[i]; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + const value_val = if (enum_type.values.len > 0) + try mod.intern_pool.getCoerced(gpa, enum_type.values[i], .comptime_int_type) + else + try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = @intCast(u64, i) }, + } }); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const enum_field_fields = try fields_anon_decl.arena().create([2]Value); - enum_field_fields.* = .{ + const enum_field_fields = .{ // name: []const u8, name_val, // value: comptime_int, - int_val, + value_val, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = enum_field_ty.toIntern(), + .storage = .{ .elems = &enum_field_fields }, + } }); } const fields_val = v: { + const fields_array_ty = try mod.arrayType(.{ + .len = enum_field_vals.len, + .child = enum_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = enum_field_vals.len, - .elem_type = enum_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - enum_field_vals, - ), + fields_array_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = fields_array_ty.toIntern(), + .storage = .{ .elems = enum_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = enum_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(), + } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const type_enum_ty = t: { + const type_enum_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Enum"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); + try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); + const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); + break :t type_enum_ty_decl.val.toType(); + }; + + const field_values = .{ // tag_type: type, - try Value.Tag.ty.create(sema.arena, int_tag_ty), + ip.indexToKey(ty.toIntern()).enum_type.tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive, + is_exhaustive.toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Enum)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_enum_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Union => { // TODO: look into memoizing this result. @@ -16265,91 +16517,135 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_union_ty = t: { + const type_union_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Union"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); + try sema.ensureDeclAnalyzed(type_union_ty_decl_index); + const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); + break :t type_union_ty_decl.val.toType(); + }; + const union_field_ty = t: { const union_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "UnionField", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "UnionField"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); - const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); + break :t union_field_ty_decl.val.toType(); }; const union_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = union_ty.containerLayout(); + const layout = union_ty.containerLayout(mod); - const union_fields = union_ty.unionFields(); - const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); + const union_fields = union_ty.unionFields(mod); + const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count()); + defer gpa.free(union_field_vals); for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; - const name = union_fields.keys()[i]; + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(union_fields.keys()[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const union_field_fields = try fields_anon_decl.arena().create([3]Value); const alignment = switch (layout) { .Auto, .Extern => try sema.unionFieldAlignment(field), .Packed => 0, }; - union_field_fields.* = .{ + const union_field_fields = .{ // name: []const u8, name_val, // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), + field.ty.toIntern(), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = union_field_ty.toIntern(), + .storage = .{ .elems = &union_field_fields }, + } }); } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = union_field_vals.len, + .child = union_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = union_field_vals.len, - .elem_type = union_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, union_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.toIntern(), + .storage = .{ .elems = union_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, union_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = union_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), + } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType()) |tag_ty| v: { - const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); - break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); - } else Value.null; + const enum_tag_ty_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, + } }); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const container_layout_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "ContainerLayout"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; + + const field_values = .{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, @@ -16358,14 +16654,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Union)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_union_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Struct => { // TODO: look into memoizing this result. @@ -16373,154 +16669,212 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_struct_ty = t: { + const type_struct_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Struct"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); + try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); + const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); + break :t type_struct_ty_decl.val.toType(); + }; + const struct_field_ty = t: { const struct_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "StructField", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "StructField"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); - const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); + break :t struct_field_ty_decl.val.toType(); }; + const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = struct_ty.containerLayout(); + const layout = struct_ty.containerLayout(mod); - const struct_field_vals = fv: { - if (struct_ty.isSimpleTupleOrAnonStruct()) { - const tuple = struct_ty.tupleFields(); - const field_types = tuple.types; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len); - for (struct_field_vals, 0..) |*struct_field_val, i| { - const field_ty = field_types[i]; - const name_val = v: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - const bytes = if (struct_ty.castTag(.anon_struct)) |payload| - try anon_decl.arena().dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); - const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), - 0, // default alignment - ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), - }); - }; + var struct_field_vals: []InternPool.Index = &.{}; + defer gpa.free(struct_field_vals); + fv: { + const struct_type = switch (ip.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |tuple| { + struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); + for (struct_field_vals, 0..) |*struct_field_val, i| { + const anon_struct_type = ip.indexToKey(struct_ty.toIntern()).anon_struct_type; + const field_ty = anon_struct_type.types[i]; + const field_val = anon_struct_type.values[i]; + const name_val = v: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + // TODO: write something like getCoercedInts to avoid needing to dupe + const bytes = if (tuple.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(struct_ty.toIntern()).anon_struct_type.names[i])) + else + try std.fmt.allocPrint(sema.arena, "{d}", .{i}); + const new_decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + }); + const new_decl = try anon_decl.finish( + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), + 0, // default alignment + ); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).toIntern(), + } }); + }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const field_val = tuple.values[i]; - const is_comptime = field_val.tag() != .unreachable_value; - const opt_default_val = if (is_comptime) field_val else null; - const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); - struct_field_fields.* = .{ - // name: []const u8, - name_val, - // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field_ty), - // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), - // is_comptime: bool, - Value.makeBool(is_comptime), - // alignment: comptime_int, - try field_ty.lazyAbiAlignment(target, fields_anon_decl.arena()), - }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); - } - break :fv struct_field_vals; - } - const struct_fields = struct_ty.structFields(); - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count()); + const is_comptime = field_val != .none; + const opt_default_val = if (is_comptime) field_val.toValue() else null; + const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); + const struct_field_fields = .{ + // name: []const u8, + name_val, + // type: type, + field_ty, + // default_value: ?*const anyopaque, + default_val_ptr.toIntern(), + // is_comptime: bool, + Value.makeBool(is_comptime).toIntern(), + // alignment: comptime_int, + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(), + }; + struct_field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.toIntern(), + .storage = .{ .elems = &struct_field_fields }, + } }); + } + break :fv; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv; + struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count()); - for (struct_field_vals, 0..) |*field_val, i| { - const field = struct_fields.values()[i]; - const name = struct_fields.keys()[i]; + for ( + struct_field_vals, + struct_obj.fields.keys(), + struct_obj.fields.values(), + ) |*field_val, name_nts, field| { + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts)); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const opt_default_val = if (field.default_val.tag() == .unreachable_value) + const opt_default_val = if (field.default_val == .none) null else - field.default_val; + field.default_val.toValue(); const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); - const alignment = field.alignment(target, layout); + const alignment = field.alignment(mod, layout); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), + field.ty.toIntern(), // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(field.is_comptime), + Value.makeBool(field.is_comptime).toIntern(), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.toIntern(), + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; - }; + } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = struct_field_vals.len, + .child = struct_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = struct_field_vals.len, - .elem_type = struct_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, struct_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.toIntern(), + .storage = .{ .elems = struct_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, struct_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = struct_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), + } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); - const backing_integer_val = blk: { - if (layout == .Packed) { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const backing_integer_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (layout == .Packed) val: { + const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); - assert(struct_obj.backing_int_ty.isInt()); - const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); - break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); - } else { - break :blk Value.initTag(.null_value); - } + assert(struct_obj.backing_int_ty.isInt(mod)); + break :val struct_obj.backing_int_ty.toIntern(); + } else .none, + } }); + + const container_layout_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "ContainerLayout"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); }; - const field_values = try sema.arena.create([5]Value); - field_values.* = .{ + const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16528,36 +16882,48 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple()), + Value.makeBool(struct_ty.isTuple(mod)).toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Struct)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_struct_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Opaque => { // TODO: look into memoizing this result. - const opaque_ty = try sema.resolveTypeFields(ty); - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace()); + const type_opaque_ty = t: { + const type_opaque_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Opaque"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); + try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); + const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); + break :t type_opaque_ty_decl.val.toType(); + }; - const field_values = try sema.arena.create([1]Value); - field_values.* = .{ + const opaque_ty = try sema.resolveTypeFields(ty); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); + + const field_values = .{ // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Opaque)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_opaque_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Frame => return sema.failWithUseOfAsync(block, src), .AnyFrame => return sema.failWithUseOfAsync(block, src), @@ -16569,8 +16935,11 @@ fn typeInfoDecls( block: *Block, src: LazySrcLoc, type_info_ty: Type, - opt_namespace: ?*Module.Namespace, -) CompileError!Value { + opt_namespace: Module.Namespace.OptionalIndex, +) CompileError!InternPool.Index { + const mod = sema.mod; + const gpa = sema.gpa; + var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16578,89 +16947,110 @@ fn typeInfoDecls( const declaration_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "Declaration", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try mod.intern_pool.getOrPutString(gpa, "Declaration"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); - const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena()); + const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); + break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(Value).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); - var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); + var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa); defer seen_namespaces.deinit(); - if (opt_namespace) |some| { - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), some, &decl_vals, &seen_namespaces); + if (opt_namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces); } + const array_decl_ty = try mod.arrayType(.{ + .len = decl_vals.items.len, + .child = declaration_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try decls_anon_decl.finish( - try Type.Tag.array.create(decls_anon_decl.arena(), .{ - .len = decl_vals.items.len, - .elem_type = declaration_ty, - }), - try Value.Tag.aggregate.create( - decls_anon_decl.arena(), - try decls_anon_decl.arena().dupe(Value, decl_vals.items), - ), + array_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = decl_vals.items }, + } })).toValue(), 0, // default alignment ); - return try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, decl_vals.items.len), - }); + return try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = declaration_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), + } }); } fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, - decls_anon_decl: Allocator, namespace: *Namespace, - decl_vals: *std.ArrayList(Value), + declaration_ty: Type, + decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { + const mod = sema.mod; + const ip = &mod.intern_pool; const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; const decls = namespace.decls.keys(); for (decls) |decl_index| { - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; - try sema.mod.ensureDeclAnalyzed(decl_index); - var buf: Value.ToTypeBuffer = undefined; - const new_ns = decl.val.toType(&buf).getNamespace().?; - try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); + try mod.ensureDeclAnalyzed(decl_index); + const new_ns = decl.val.toType().getNamespace(mod).?; + try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces); continue; } if (decl.kind != .named) continue; const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(decl.name)); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(decls_anon_decl, .{ - .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try Value.Tag.int_u64.create(decls_anon_decl, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const fields = try decls_anon_decl.create([2]Value); - fields.* = .{ + const fields = .{ //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub), + Value.makeBool(decl.is_pub).toIntern(), }; - try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields)); + try decl_vals.append(try mod.intern(.{ .aggregate = .{ + .ty = declaration_ty.toIntern(), + .storage = .{ .elems = &fields }, + } })); } } @@ -16695,7 +17085,7 @@ fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveBody(&child_block, body, inst); const operand_ty = sema.typeOf(operand); - if (operand_ty.tag() == .generic_poison) return error.GenericPoison; + if (operand_ty.isGenericPoison()) return error.GenericPoison; return sema.addType(operand_ty); } @@ -16709,10 +17099,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { - switch (operand.zigTypeTag()) { + const mod = sema.mod; + switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { - const bits = operand.bitSize(sema.mod.getTarget()); + const bits = operand.bitSize(mod); const count = if (bits == 0) 0 else blk: { @@ -16723,14 +17114,14 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi } break :blk count; }; - return Module.makeIntType(sema.arena, .unsigned, count); + return mod.intType(.unsigned, count); }, .Vector => { - const elem_ty = operand.elemType2(); + const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); - return Type.Tag.vector.create(sema.arena, .{ - .len = operand.vectorLen(), - .elem_type = log2_elem_ty, + return mod.vectorType(.{ + .len = operand.vectorLen(mod), + .child = log2_elem_ty.toIntern(), }); }, else => {}, @@ -16739,7 +17130,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi block, src, "bit shifting operation expected integer type, found '{}'", - .{operand.fmt(sema.mod)}, + .{operand.fmt(mod)}, ); } @@ -16790,6 +17181,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; @@ -16797,7 +17189,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - return if (val.isUndef()) + return if (val.isUndef(mod)) sema.addConstUndef(Type.bool) else if (val.toBool()) Air.Inst.Ref.bool_false @@ -16817,6 +17209,7 @@ fn zirBoolBr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const lhs = try sema.resolveInst(inst_data.lhs); @@ -16865,12 +17258,12 @@ fn zirBoolBr( _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body, inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { _ = try rhs_block.addBr(block_inst, rhs_result); } const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool()) { return Air.Inst.Ref.bool_true; @@ -16920,9 +17313,10 @@ fn finishCondBr( } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, - .Pointer => if (ty.isPtrLikeOptional()) return, + .Pointer => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.failWithExpectedOptionalType(block, src, ty); @@ -16951,10 +17345,11 @@ fn zirIsNonNullPtr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod)); if ((try sema.resolveMaybeUndefVal(ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } @@ -16963,10 +17358,11 @@ fn zirIsNonNullPtr( } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }), } } @@ -16986,10 +17382,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod)); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -17012,6 +17409,7 @@ fn zirCondbr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -17052,8 +17450,8 @@ fn zirCondbr( const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - assert(operand_ty.zigTypeTag() == .ErrorUnion); - const result_ty = operand_ty.errorUnionSet(); + assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); + const result_ty = operand_ty.errorUnionSet(mod); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17079,7 +17477,7 @@ fn zirCondbr( return always_noreturn; } -fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17087,9 +17485,10 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -17124,7 +17523,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! return try_inst; } -fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17133,9 +17532,10 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -17156,9 +17556,9 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr _ = try sema.analyzeBodyInner(&sub_block, body); const operand_ty = sema.typeOf(operand); - const ptr_info = operand_ty.ptrInfo().data; - const res_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = err_union_ty.errorUnionPayload(), + const ptr_info = operand_ty.ptrInfo(mod); + const res_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = err_union_ty.errorUnionPayload(mod), .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", @@ -17254,16 +17654,17 @@ fn zirRetErrValue( block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); + const err_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(err_name); const src = inst_data.src(); - // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); + const error_set_type = try mod.singleErrorSetType(err_name); + const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = err_name, + } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -17275,16 +17676,17 @@ fn zirRetImplicit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = try sema.resolveInst(inst_data.operand); const r_brace_src = inst_data.src(); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const base_tag = sema.fn_ret_ty.baseZigTypeTag(); + const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod); if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17294,7 +17696,7 @@ fn zirRetImplicit( } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17346,6 +17748,7 @@ fn retWithErrTracing( ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); @@ -17357,7 +17760,7 @@ fn retWithErrTracing( const gpa = sema.gpa; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const return_err_fn = try sema.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; @@ -17397,17 +17800,19 @@ fn retWithErrTracing( } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return false; - return fn_ret_ty.isError() and - sema.mod.comp.bin_file.options.error_return_tracing; + return fn_ret_ty.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return; + if (!mod.backendSupportsFeature(.error_return_trace)) return; + if (!mod.comp.bin_file.options.error_return_tracing) return; // This is only relevant at runtime. if (block.is_comptime or block.is_typeof) return; @@ -17415,7 +17820,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const save_index = inst_data.operand == .none or b: { const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - break :b operand_ty.isError(); + break :b operand_ty.isError(mod); }; if (save_index) @@ -17436,7 +17841,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) const tracy = trace(@src()); defer tracy.end(); - const saved_index = if (Zir.refToIndex(inst_data.block)) |zir_block| b: { + const saved_index = if (Zir.refToIndexAllowNone(inst_data.block)) |zir_block| b: { var block = start_block; while (true) { if (block.label) |label| { @@ -17462,22 +17867,21 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere - const operand = try sema.resolveInst(inst_data.operand); + const operand = try sema.resolveInstAllowNone(inst_data.operand); return sema.popErrorReturnTrace(start_block, src, operand, saved_index); } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { - assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion); + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); - if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { + if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| { const op_ty = sema.typeOf(uncasted_operand); - switch (op_ty.zigTypeTag()) { - .ErrorSet => { - try payload.data.addErrorSet(sema.gpa, op_ty); - }, - .ErrorUnion => { - try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); - }, + switch (op_ty.zigTypeTag(mod)) { + .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), + .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa), else => {}, } } @@ -17492,7 +17896,8 @@ fn analyzeRet( // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. - if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { + const mod = sema.mod; + if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { @@ -17540,6 +17945,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node }; @@ -17552,46 +17958,54 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const elem_ty = blk: { const air_inst = try sema.resolveInst(extra.data.elem_type); const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| { - if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer()) { + if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) { try sema.errNote(block, elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{}); } return err; }; - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; break :blk ty; }; - const target = sema.mod.getTarget(); + + if (elem_ty.zigTypeTag(mod) == .NoReturn) + return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); + + const target = mod.getTarget(); var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; - break :blk (try sema.resolveInstConst(block, sentinel_src, ref, "pointer sentinel value must be comptime-known")).val; - } else null; + const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); + const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); + break :blk val.toIntern(); + } else .none; - const abi_align: u32 = if (inst_data.flags.has_align) blk: { + const abi_align: InternPool.Alignment = if (inst_data.flags.has_align) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - if (val.castTag(.lazy_align)) |payload| { - if (payload.data.eql(elem_ty, sema.mod)) { - break :blk 0; - } + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none, + else => {}, + }, + else => {}, } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); - break :blk abi_align; - } else 0; + break :blk InternPool.Alignment.fromByteUnits(abi_align); + } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); - } else if (elem_ty.zigTypeTag() == .Fn and target.cpu.arch == .avr) .flash else .generic; + } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17611,50 +18025,52 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{}); } - if (elem_ty.zigTypeTag() == .NoReturn) { - return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; - if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and + const fn_align = mod.typeToFunc(elem_ty).?.alignment; + if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (inst_data.size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{}); } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl), elem_ty, .other); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{}); } } - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = elem_ty, + const ty = try mod.ptrType(.{ + .child = elem_ty.toIntern(), .sentinel = sentinel, - .@"align" = abi_align, - .@"addrspace" = address_space, - .bit_offset = bit_offset, - .host_size = host_size, - .mutable = inst_data.flags.is_mutable, - .@"allowzero" = inst_data.flags.is_allowzero, - .@"volatile" = inst_data.flags.is_volatile, - .size = inst_data.size, + .flags = .{ + .alignment = abi_align, + .address_space = address_space, + .is_const = !inst_data.flags.is_mutable, + .is_allowzero = inst_data.flags.is_allowzero, + .is_volatile = inst_data.flags.is_volatile, + .size = inst_data.size, + }, + .packed_offset = .{ + .bit_offset = bit_offset, + .host_size = host_size, + }, }); return sema.addType(ty); } @@ -17666,8 +18082,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); + const mod = sema.mod; - switch (obj_ty.zigTypeTag()) { + switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty), .Void => return sema.addConstant(obj_ty, Value.void), @@ -17683,12 +18100,13 @@ fn structInitEmpty( dest_src: LazySrcLoc, init_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. const struct_ty = try sema.resolveTypeFields(obj_ty); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); @@ -17696,20 +18114,19 @@ fn structInitEmpty( } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { - const arr_len = obj_ty.arrayLen(); + const mod = sema.mod; + const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { - if (obj_ty.zigTypeTag() == .Array) { + if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); } else { return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel()) |sentinel| { - const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); - return sema.addConstant(obj_ty, val); - } else { - return sema.addConstant(obj_ty, Value.initTag(.empty_array)); - } + return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ + .ty = obj_ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue()); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -17719,7 +18136,7 @@ fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); const init = try sema.resolveInst(extra.init); return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src); } @@ -17731,21 +18148,23 @@ fn unionInit( init_src: LazySrcLoc, union_ty: Type, union_ty_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); - const field = union_ty.unionFields().values()[field_index]; + const field = union_ty.unionFields(mod).values()[field_index]; const init = try sema.coerce(block, field.ty, uncasted_init, init_src); if (try sema.resolveMaybeUndefVal(init)) |init_val| { - const tag_ty = union_ty.unionTagTypeHypothetical(); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = init_val, - })); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); + return sema.addConstant(union_ty, (try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try init_val.intern(field.ty, mod), + } })).toValue()); } try sema.requireRuntimeBlock(block, init_src, null); @@ -17766,29 +18185,30 @@ fn zirStructInit( const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); + const mod = sema.mod; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type); try sema.resolveTypeLayout(resolved_ty); - if (resolved_ty.zigTypeTag() == .Struct) { + if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. - const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod)); defer gpa.free(found_fields); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); var field_i: u32 = 0; var extra_index = extra.end; - const is_packed = resolved_ty.containerLayout() == .Packed; + const is_packed = resolved_ty.containerLayout(mod) == .Packed; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; @@ -17796,8 +18216,8 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); - const field_index = if (resolved_ty.isTuple()) + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); + const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else try sema.structFieldIndex(block, resolved_ty, field_name, field_src); @@ -17815,19 +18235,19 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(field_index)) |default_value| { + if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index), sema.mod)) { + if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } }; } return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref); - } else if (resolved_ty.zigTypeTag() == .Union) { + } else if (resolved_ty.zigTypeTag(mod) == .Union) { if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } @@ -17837,32 +18257,32 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); - const tag_ty = resolved_ty.unionTagTypeHypothetical(); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { - return sema.addConstantMaybeRef( - block, - resolved_ty, - try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), - is_ref, - ); + const field = resolved_ty.unionFields(mod).values()[field_index]; + return sema.addConstantMaybeRef(block, resolved_ty, (try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try val.intern(field.ty, mod), + } })).toValue(), is_ref); } if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); const field_ptr = try sema.unionFieldPtr(block, field_src, alloc, field_name, field_src, resolved_ty, true); try sema.storePtr(block, src, field_ptr, init_inst); - const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(), tag_val); + const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(mod), tag_val); _ = try block.addBinOp(.set_union_tag, alloc, new_tag); return sema.makePtrConst(block, alloc); } @@ -17870,7 +18290,7 @@ fn zirStructInit( try sema.requireRuntimeBlock(block, src, null); try sema.queueFullTypeResolution(resolved_ty); return block.addUnionInit(resolved_ty, field_index, init_inst); - } else if (resolved_ty.isAnonStruct()) { + } else if (resolved_ty.isAnonStruct(mod)) { return sema.fail(block, src, "TODO anon struct init validation", .{}); } unreachable; @@ -17885,76 +18305,70 @@ fn finishStructInit( struct_ty: Type, is_ref: bool, ) CompileError!Air.Inst.Ref { - const gpa = sema.gpa; + const mod = sema.mod; + const ip = &mod.intern_pool; var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - if (struct_ty.isAnonStruct()) { - const struct_obj = struct_ty.castTag(.anon_struct).?.data; - for (struct_obj.values, 0..) |default_val, i| { - if (field_inits[i] != .none) continue; + switch (ip.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |anon_struct| { + for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { + if (field_inits[i] != .none) continue; - if (default_val.tag() == .unreachable_value) { - const field_name = struct_obj.names[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); + if (default_val == .none) { + if (anon_struct.names.len == 0) { + const template = "missing tuple field with index {d}"; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, .{i}); + } else { + root_msg = try sema.errMsg(block, init_src, template, .{i}); + } + } else { + const field_name = anon_struct.names[i]; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } + } } else { - root_msg = try sema.errMsg(block, init_src, template, args); + field_inits[i] = try sema.addConstant(field_ty.toType(), default_val.toValue()); } - } else { - field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val); } - } - } else if (struct_ty.isTuple()) { - var i: u32 = 0; - const len = struct_ty.structFieldCount(); - while (i < len) : (i += 1) { - if (field_inits[i] != .none) continue; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + for (struct_obj.fields.values(), 0..) |field, i| { + if (field_inits[i] != .none) continue; - const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { - const template = "missing tuple field with index {d}"; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, .{i}); + if (field.default_val == .none) { + const field_name = struct_obj.fields.keys()[i]; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } } else { - root_msg = try sema.errMsg(block, init_src, template, .{i}); + field_inits[i] = try sema.addConstant(field.ty, field.default_val.toValue()); } - } else { - field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i), default_val); } - } - } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - for (struct_obj.fields.values(), 0..) |field, i| { - if (field_inits[i] != .none) continue; - - if (field.default_val.tag() == .unreachable_value) { - const field_name = struct_obj.fields.keys()[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); - } else { - root_msg = try sema.errMsg(block, init_src, template, args); - } - } else { - field_inits[i] = try sema.addConstant(field.ty, field.default_val); - } - } + }, + else => unreachable, } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -17969,18 +18383,22 @@ fn finishStructInit( } else null; const runtime_index = opt_runtime_index orelse { - const values = try sema.arena.alloc(Value, field_inits.len); - for (field_inits, 0..) |field_init, i| { - values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?; + const elems = try sema.arena.alloc(InternPool.Index, field_inits.len); + for (elems, field_inits, 0..) |*elem, field_init, field_i| { + elem.* = try (sema.resolveMaybeUndefVal(field_init) catch unreachable).? + .intern(struct_ty.structFieldType(field_i, mod), mod); } - const struct_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, struct_ty, struct_val, is_ref); + const struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.toIntern(), + .storage = .{ .elems = elems }, + } }); + return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref); }; if (is_ref) { try sema.resolveStructLayout(struct_ty); const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = struct_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -17997,8 +18415,8 @@ fn finishStructInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(dest_src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, dest_src, field_src); unreachable; }, @@ -18014,79 +18432,85 @@ fn zirStructInitAnon( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); - const types = try sema.arena.alloc(Type, extra.data.fields_len); - const values = try sema.arena.alloc(Value, types.len); - var fields = std.StringArrayHashMapUnmanaged(u32){}; - defer fields.deinit(sema.gpa); - try fields.ensureUnusedCapacity(sema.gpa, types.len); + const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len); + const values = try sema.arena.alloc(InternPool.Index, types.len); + var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena); + try fields.ensureUnusedCapacity(types.len); // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; var extra_index = extra.end; - for (types, 0..) |*field_ty, i| { + for (types, 0..) |*field_ty, i_usize| { + const i = @intCast(u32, i_usize); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; const name = sema.code.nullTerminatedString(item.data.field_name); - const gop = fields.getOrPutAssumeCapacity(name); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const gop = fields.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { const msg = msg: { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); - const prev_source = Module.initSrc(src.node_offset.x, sema.gpa, decl, gop.value_ptr.*); + const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*); try sema.errNote(block, prev_source, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - gop.value_ptr.* = @intCast(u32, i); + gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag() == .Opaque) { + field_ty.* = sema.typeOf(init).toIntern(); + if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, field_ty.toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(init)) |init_val| { - values[i] = init_val; + values[i] = try init_val.intern(field_ty.toType(), mod); } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = .none; runtime_index = i; } } break :rs runtime_index; }; - const tuple_ty = try Type.Tag.anon_struct.create(sema.arena, .{ - .names = try sema.arena.dupe([]const u8, fields.keys()), + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .names = fields.keys(), .types = types, .values = values, - }); + } }); const runtime_index = opt_runtime_index orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, field_src); unreachable; }, @@ -18094,9 +18518,9 @@ fn zirStructInitAnon( }; if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18106,12 +18530,12 @@ fn zirStructInitAnon( const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; - const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const field_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = field_ty, + .pointee_type = field_ty.toType(), }); - if (values[i].tag() == .unreachable_value) { + if (values[i] == .none) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); @@ -18129,7 +18553,7 @@ fn zirStructInitAnon( element_refs[i] = try sema.resolveInst(item.data.init); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayInit( @@ -18138,6 +18562,7 @@ fn zirArrayInit( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -18147,20 +18572,20 @@ fn zirArrayInit( assert(args.len >= 2); // array_ty + at least one element const array_ty = try sema.resolveType(block, src, args[0]); - const sentinel_val = array_ty.sentinel(); + const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); - const elem_ty = if (array_ty.zigTypeTag() == .Struct) - array_ty.structFieldType(i) + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) else - array_ty.elemType2(); + array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const decl = mod.declPtr(block.src_decl); + const elem_src = mod.initSrc(src.node_offset.x, decl, i); _ = try sema.coerce(block, elem_ty, resolved_arg, elem_src); unreachable; }, @@ -18169,7 +18594,7 @@ fn zirArrayInit( } if (sentinel_val) |some| { - resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some); + resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some); } const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { @@ -18178,21 +18603,25 @@ fn zirArrayInit( } else null; const runtime_index = opt_runtime_index orelse { - const elem_vals = try sema.arena.alloc(Value, resolved_args.len); - - for (resolved_args, 0..) |arg, i| { + const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len); + for (elem_vals, resolved_args, 0..) |*val, arg, i| { + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) + else + array_ty.elemType2(mod); // We checked that all args are comptime above. - elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?; + val.* = try ((sema.resolveMaybeUndefVal(arg) catch unreachable).?).intern(elem_ty, mod); } - - const array_val = try Value.Tag.aggregate.create(sema.arena, elem_vals); - return sema.addConstantMaybeRef(block, array_ty, array_val, is_ref); + return sema.addConstantMaybeRef(block, array_ty, (try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = elem_vals }, + } })).toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, elem_src); unreachable; }, @@ -18201,19 +18630,19 @@ fn zirArrayInit( try sema.queueFullTypeResolution(array_ty); if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = array_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); - if (array_ty.isTuple()) { + if (array_ty.isTuple(mod)) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.structFieldType(i), + .pointee_type = array_ty.structFieldType(i, mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18224,10 +18653,10 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.elemType2(), + .pointee_type = array_ty.elemType2(mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18252,44 +18681,49 @@ fn zirArrayInitAnon( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); + const mod = sema.mod; - const types = try sema.arena.alloc(Type, operands.len); - const values = try sema.arena.alloc(Value, operands.len); + const types = try sema.arena.alloc(InternPool.Index, operands.len); + const values = try sema.arena.alloc(InternPool.Index, operands.len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag() == .Opaque) { + types[i] = sema.typeOf(elem).toIntern(); + if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, types[i].toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val; + values[i] = val.toIntern(); } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = .none; runtime_src = operand_src; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -18297,7 +18731,7 @@ fn zirArrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18306,9 +18740,9 @@ fn zirArrayInitAnon( const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = types[i], + .pointee_type = types[i].toType(), }); - if (values[i].tag() == .unreachable_value) { + if (values[i] == .none) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } @@ -18322,7 +18756,7 @@ fn zirArrayInitAnon( element_refs[i] = try sema.resolveInst(operand); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn addConstantMaybeRef( @@ -18337,8 +18771,8 @@ fn addConstantMaybeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), - try val.copy(anon_decl.arena()), + ty, + val, 0, // default alignment ); return sema.analyzeDeclRef(decl); @@ -18350,11 +18784,13 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const ty_src = inst_data.src(); const field_src = inst_data.src(); const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "field name must be comptime-known"); return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src); } fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const ty_src = inst_data.src(); @@ -18367,7 +18803,8 @@ fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => return Air.Inst.Ref.generic_poison_type, else => |e| return e, }; - const field_name = sema.code.nullTerminatedString(extra.name_start); + const zir_field_name = sema.code.nullTerminatedString(extra.name_start); + const field_name = try ip.getOrPutString(sema.gpa, zir_field_name); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -18375,41 +18812,43 @@ fn fieldType( sema: *Sema, block: *Block, aggregate_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; var cur_ty = aggregate_ty; while (true) { const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; - switch (cur_ty.zigTypeTag()) { - .Struct => { - if (cur_ty.isAnonStruct()) { + switch (cur_ty.zigTypeTag(mod)) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.toIntern())) { + .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); - return sema.addType(cur_ty.tupleFields().types[field_index]); - } - const struct_obj = cur_ty.castTag(.@"struct").?.data; - const field = struct_obj.fields.get(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return sema.addType(field.ty); + return sema.addType(anon_struct.types[field_index].toType()); + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.get(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); + return sema.addType(field.ty); + }, + else => unreachable, }, .Union => { - const union_obj = cur_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(cur_ty).?; const field = union_obj.fields.get(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return sema.addType(field.ty); }, .Optional => { - if (cur_ty.castTag(.optional)) |some| { - // Struct/array init through optional requires the child type to not be a pointer. - // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = some.data; - continue; - } + // Struct/array init through optional requires the child type to not be a pointer. + // If the child of .optional is a pointer it'll error on the next loop. + cur_ty = mod.intern_pool.indexToKey(cur_ty.toIntern()).opt_type.toType(); + continue; }, .ErrorUnion => { - cur_ty = cur_ty.errorUnionPayload(); + cur_ty = cur_ty.errorUnionPayload(mod); continue; }, else => {}, @@ -18425,18 +18864,23 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { + const mod = sema.mod; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const opt_ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod); if (sema.owner_func != null and sema.owner_func.?.calls_or_awaits_errorable_fn and - sema.mod.comp.bin_file.options.error_return_tracing and - sema.mod.backendSupportsFeature(.error_return_trace)) + mod.comp.bin_file.options.error_return_tracing and + mod.backendSupportsFeature(.error_return_trace)) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } - return sema.addConstant(opt_ptr_stack_trace_ty, Value.null); + return sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); } fn zirFrame( @@ -18449,27 +18893,28 @@ fn zirFrame( } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - if (ty.isNoReturn()) { + if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiAlignment(target, sema.arena); - if (val.tag() == .lazy_align) { + const val = try ty.lazyAbiAlignment(mod); + if (val.isLazyAlign(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); } fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool()) return sema.addConstant(Type.u1, Value.one); - return sema.addConstant(Type.u1, Value.zero); + if (val.isUndef(mod)) return sema.addConstUndef(Type.u1); + if (val.toBool()) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); } @@ -18480,8 +18925,8 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const bytes = val.castTag(.@"error").?.data.name; - return sema.addStrLit(block, bytes); + const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; + return sema.addStrLit(block, sema.mod.intern_pool.stringToSlice(err_name)); } // Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass @@ -18499,16 +18944,17 @@ fn zirUnaryMath( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, .Vector => { - const scalar_ty = operand_ty.scalarType(); - switch (scalar_ty.zigTypeTag()) { + const scalar_ty = operand_ty.scalarType(mod); + switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}), } @@ -18516,25 +18962,27 @@ fn zirUnaryMath( else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}), } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const scalar_ty = operand_ty.scalarType(); - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); + const scalar_ty = operand_ty.scalarType(mod); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = scalar_ty.toIntern(), + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); + const elem_val = try val.elemValue(sema.mod, i); + elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -18542,7 +18990,7 @@ fn zirUnaryMath( }, .ComptimeFloat, .Float => { if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) + if (operand_val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod); return sema.addConstant(operand_ty, result_val); @@ -18562,16 +19010,17 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const mod = sema.mod; + const ip = &mod.intern_pool; try sema.resolveTypeLayout(operand_ty); - const enum_ty = switch (operand_ty.zigTypeTag()) { + const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const bytes = val.castTag(.enum_literal).?.data; - return sema.addStrLit(block, bytes); + const tag_name = ip.indexToKey(val.toIntern()).enum_literal; + return sema.addStrLit(block, ip.stringToSlice(tag_name)); }, .Enum => operand_ty, - .Union => operand_ty.unionTagType() orelse { + .Union => operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "union '{}' is untagged", .{ operand_ty.fmt(sema.mod), @@ -18586,30 +19035,31 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air operand_ty.fmt(mod), }), }; - if (enum_ty.enumFieldCount() == 0) { + if (enum_ty.enumFieldCount(mod) == 0) { // TODO I don't think this is the correct way to handle this but // it prevents a crash. return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{ enum_ty.fmt(mod), }); } - const enum_decl_index = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(mod); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { const enum_decl = mod.declPtr(enum_decl_index); const msg = msg: { - const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{s}'", .{ - val.fmtValue(enum_ty, sema.mod), enum_decl.name, + const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{ + val.fmtValue(enum_ty, sema.mod), enum_decl.name.fmt(ip), }); errdefer msg.destroy(sema.gpa); - try mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; - const field_name = enum_ty.enumFieldName(field_index); - return sema.addStrLit(block, field_name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const field_name = enum_ty.enumFieldName(field_index, mod); + return sema.addStrLit(block, ip.stringToSlice(field_name)); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) { @@ -18622,8 +19072,15 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return block.addUnOp(.tag_name, casted_operand); } -fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirReify( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -18632,10 +19089,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known"); - const union_val = val.cast(Value.Payload.Union).?.data; + const union_val = ip.indexToKey(val.toIntern()).un; const target = mod.getTarget(); - const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; - if (union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -18648,41 +19105,48 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const signedness_val = struct_val[0]; - const bits_val = struct_val[1]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const signedness_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?, + ); + const bits_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "bits")).?, + ); - const signedness = signedness_val.toEnum(std.builtin.Signedness); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); - const ty = switch (signedness) { - .signed => try Type.Tag.int_signed.create(sema.arena, bits), - .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), - }; + const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, .Vector => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const len_val = struct_val[0]; - const child_val = struct_val[1]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = child_val.toType(&buffer); + const len = @intCast(u32, len_val.toUnsignedInt(mod)); + const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); - const ty = try Type.vector(sema.arena, len, try child_ty.copy(sema.arena)); + const ty = try mod.vectorType(.{ + .len = len, + .child = child_ty.toIntern(), + }); return sema.addType(ty); }, .Float => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // bits: comptime_int, - const bits_val = struct_val[0]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "bits"), + ).?); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -18694,25 +19158,42 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.addType(ty); }, .Pointer => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const size_val = struct_val[0]; - const is_const_val = struct_val[1]; - const is_volatile_val = struct_val[2]; - const alignment_val = struct_val[3]; - const address_space_val = struct_val[4]; - const child_val = struct_val[5]; - const is_allowzero_val = struct_val[6]; - const sentinel_val = struct_val[7]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "size"), + ).?); + const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_const"), + ).?); + const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_volatile"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "address_space"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_allowzero"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); - var buffer: Value.ToTypeBuffer = undefined; - const unresolved_elem_ty = child_val.toType(&buffer); - const elem_ty = if (abi_align == 0) + const abi_align = InternPool.Alignment.fromByteUnits( + (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?, + ); + + const unresolved_elem_ty = child_val.toType(); + const elem_ty = if (abi_align == .none) unresolved_elem_ty else t: { const elem_ty = try sema.resolveTypeFields(unresolved_elem_ty); @@ -18720,301 +19201,282 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :t elem_ty; }; - const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); + const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); - var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull()) { - if (ptr_size == .One or ptr_size == .C) { - return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + const actual_sentinel: InternPool.Index = s: { + if (!sentinel_val.isNull(mod)) { + if (ptr_size == .One or ptr_size == .C) { + return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + } + const sentinel_ptr_val = sentinel_val.optionalValue(mod).?; + const ptr_ty = try Type.ptr(sema.arena, mod, .{ + .@"addrspace" = .generic, + .pointee_type = elem_ty, + }); + const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; + break :s sent_val.toIntern(); } - const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; - const ptr_ty = try Type.ptr(sema.arena, mod, .{ - .@"addrspace" = .generic, - .pointee_type = try elem_ty.copy(sema.arena), - }); - actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; - } + break :s .none; + }; - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; - if (abi_align != 0 and fn_align != 0 and + const fn_align = mod.typeToFunc(elem_ty).?.alignment; + if (abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (ptr_size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{}); } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "C pointers cannot point to opaque types", .{}); } } - const ty = try Type.ptr(sema.arena, mod, .{ - .size = ptr_size, - .mutable = !is_const_val.toBool(), - .@"volatile" = is_volatile_val.toBool(), - .@"align" = abi_align, - .@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace), - .pointee_type = try elem_ty.copy(sema.arena), - .@"allowzero" = is_allowzero_val.toBool(), + const ty = try mod.ptrType(.{ + .child = elem_ty.toIntern(), .sentinel = actual_sentinel, + .flags = .{ + .size = ptr_size, + .is_const = is_const_val.toBool(), + .is_volatile = is_volatile_val.toBool(), + .alignment = abi_align, + .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), + .is_allowzero = is_allowzero_val.toBool(), + }, }); return sema.addType(ty); }, .Array => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // len: comptime_int, - const len_val = struct_val[0]; - // child: type, - const child_val = struct_val[1]; - // sentinel: ?*const anyopaque, - const sentinel_val = struct_val[2]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); - const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { + const len = len_val.toUnsignedInt(mod); + const child_ty = child_val.toType(); + const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); - break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; + break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; - const ty = try Type.array(sema.arena, len, sentinel, child_ty, sema.mod); + const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); return sema.addType(ty); }, .Optional => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // child: type, - const child_val = struct_val[0]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const child_ty = child_val.toType(); - const ty = try Type.optional(sema.arena, child_ty); + const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); }, .ErrorUnion => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // error_set: type, - const error_set_val = struct_val[0]; - // payload: type, - const payload_val = struct_val[1]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "error_set"), + ).?); + const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "payload"), + ).?); - var buffer: Value.ToTypeBuffer = undefined; - const error_set_ty = try error_set_val.toType(&buffer).copy(sema.arena); - const payload_ty = try payload_val.toType(&buffer).copy(sema.arena); + const error_set_ty = error_set_val.toType(); + const payload_ty = payload_val.toType(); - if (error_set_ty.zigTypeTag() != .ErrorSet) { + if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } - const ty = try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = payload_ty, - }); + const ty = try mod.errorUnionType(error_set_ty, payload_ty); return sema.addType(ty); }, .ErrorSet => { - const payload_val = union_val.val.optionalValue() orelse - return sema.addType(Type.initTag(.anyerror)); - const slice_val = payload_val.castTag(.slice).?.data; + const payload_val = union_val.val.toValue().optionalValue(mod) orelse + return sema.addType(Type.anyerror); - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod.getTarget())); - var names: Module.ErrorSet.NameMap = .{}; + const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); - var i: usize = 0; - while (i < len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = slice_val.ptr.elemValueBuffer(mod, i, &buf); - const struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // error_set: type, - const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod); + for (0..len) |i| { + const elem_val = try payload_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); - const kv = try mod.getErrorValue(name_str); - const gop = names.getOrPutAssumeCapacity(kv.key); + const name = try name_val.toIpString(Type.slice_const_u8, mod); + _ = try mod.getErrorValue(name); + const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { - return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); + return sema.fail(block, src, "duplicate error '{}'", .{ + name.fmt(ip), + }); } } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.addType(ty); }, .Struct => { - // TODO use reflection instead of magic numbers here - const struct_val = union_val.val.castTag(.aggregate).?.data; - // layout: containerlayout, - const layout_val = struct_val[0]; - // backing_int: ?type, - const backing_int_val = struct_val[1]; - // fields: []const enumfield, - const fields_val = struct_val[2]; - // decls: []const declaration, - const decls_val = struct_val[3]; - // is_tuple: bool, - const is_tuple_val = struct_val[4]; - assert(struct_val.len == 5); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "backing_integer"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_tuple"), + ).?); - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } - if (layout != .Packed and !backing_int_val.isNull()) { + if (layout != .Packed and !backing_integer_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } - return try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy, is_tuple_val.toBool()); + return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool()); }, .Enum => { - const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // tag_type: type, - const tag_type_val = struct_val[0]; - // fields: []const EnumField, - const fields_val = struct_val[1]; - // decls: []const Declaration, - const decls_val = struct_val[2]; - // is_exhaustive: bool, - const is_exhaustive_val = struct_val[3]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_exhaustive"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } - const gpa = sema.gpa; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + const int_tag_ty = tag_type_val.toType(); + if (int_tag_ty.zigTypeTag(mod) != .Int) { + return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); + } + + // Because these things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains + // an InternPool index. - // Define our empty enum decl - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ - .tag = if (!is_exhaustive_val.toBool()) - .enum_nonexhaustive - else - .enum_full, - }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = false, - .fields = .{}, - .values = .{}, - .namespace = .{ - .parent = block.namespace, - .ty = enum_ty, - .file_scope = block.getFileScope(), - }, - }; - - // Enum tag type - var buffer: Value.ToTypeBuffer = undefined; - const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator); - - if (int_tag_ty.zigTypeTag() != .Int) { - return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); } - enum_obj.tag_ty = int_tag_ty; - // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, + // Define our empty enum decl + const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + const incomplete_enum = try ip.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = .none, + .fields_len = fields_len, + .has_values = true, + .tag_mode = if (!is_exhaustive_val.toBool()) + .nonexhaustive + else + .explicit, + .tag_ty = int_tag_ty.toIntern(), }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(incomplete_enum.index); - var field_i: usize = 0; - while (field_i < fields_len) : (field_i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, field_i); - const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // value: comptime_int - const value_val = field_struct_val[1]; + new_decl.ty = Type.type; + new_decl.val = incomplete_enum.index.toValue(); - const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), - new_decl_arena_allocator, - sema.mod, - ); + for (0..fields_len) |field_i| { + const elem_val = try fields_val.elemValue(mod, field_i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "value"), + ).?); - if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); + + if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location - return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ - field_name, + return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{ + field_name.fmt(ip), value_val.fmtValue(Type.comptime_int, mod), - enum_obj.tag_ty.fmt(mod), + int_tag_ty.fmt(mod), }); } - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { + if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| { const msg = msg: { - const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{ + field_name.fmt(ip), + }); errdefer msg.destroy(gpa); + _ = other_index; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - const copied_tag_val = try value_val.copy(new_decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { + if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); + _ = other; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other enum tag value here", .{}); break :msg msg; }; @@ -19022,182 +19484,209 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } } - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Opaque => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // decls: []const Declaration, - const decls_val = struct_val[0]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, + // `undefined` placeholders are used in two places before being set + // after the opaque type gains an InternPool index. - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = opaque_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const opaque_ty = try mod.intern(.{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(opaque_ty); + + new_decl.ty = Type.type; + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Union => { - // TODO use reflection instead of magic numbers here - const struct_val = union_val.val.castTag(.aggregate).?.data; - // layout: containerlayout, - const layout_val = struct_val[0]; - // tag_type: ?type, - const tag_type_val = struct_val[1]; - // fields: []const enumfield, - const fields_val = struct_val[2]; - // decls: []const declaration, - const decls_val = struct_val[3]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull()) - Type.Tag.union_tagged - else if (layout != .Auto) - Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = new_union_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = layout, .status = .have_field_types, - .namespace = .{ - .parent = block.namespace, - .ty = union_ty, - .file_scope = block.getFileScope(), + .namespace = new_namespace_index, + }); + const union_obj = mod.unionPtr(union_index); + errdefer mod.destroyUnion(union_index); + + const union_ty = try ip.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (!tag_type_val.isNull(mod)) + .tagged + else if (layout != .Auto) + .none + else switch (mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, }, - }; + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(union_ty); + + new_decl.ty = Type.type; + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); // Tag type - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - if (tag_type_val.optionalValue()) |payload_val| { - var buffer: Value.ToTypeBuffer = undefined; - union_obj.tag_ty = try payload_val.toType(&buffer).copy(new_decl_arena_allocator); + var explicit_tags_seen: []bool = &.{}; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; + if (tag_type_val.optionalValue(mod)) |payload_val| { + union_obj.tag_ty = payload_val.toType(); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { - return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); - } - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { + .enum_type => |x| x, + else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), + }; + + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } else { - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, fields_len, null); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } // Fields - try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); - var i: usize = 0; - while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); - const field_struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // type: type, - const type_val = field_struct_val[1]; - // alignment: comptime_int, - const alignment_val = field_struct_val[2]; + for (0..fields_len) |i| { + const elem_val = try fields_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); - const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), - new_decl_arena_allocator, - sema.mod, - ); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + if (enum_field_names.len != 0) { + enum_field_names[i] = field_name; } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "no field named '{}' in enum '{}'", .{ + field_name.fmt(ip), + union_obj.tag_ty.fmt(mod), + }); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate union field {s}", .{field_name}); + return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, - .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?), + .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19206,23 +19695,23 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19231,47 +19720,61 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{ + field_name.fmt(ip), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null); } - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Fn => { - const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // calling_convention: CallingConvention, - const cc = struct_val[0].toEnum(std.builtin.CallingConvention); - // alignment: comptime_int, - const alignment_val = struct_val[1]; - // is_generic: bool, - const is_generic = struct_val[2].toBool(); - // is_var_args: bool, - const is_var_args = struct_val[3].toBool(); - // return_type: ?type, - const return_type_val = struct_val[4]; - // args: []const Param, - const args_val = struct_val[5]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "calling_convention"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_var_args"), + ).?); + const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "return_type"), + ).?); + const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "params"), + ).?); + const is_generic = is_generic_val.toBool(); if (is_generic) { return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{}); } + const is_var_args = is_var_args_val.toBool(); + const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val); if (is_var_args and cc != .C) { return sema.fail(block, src, "varargs functions must have C calling convention", .{}); } @@ -19280,63 +19783,55 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(target)); + const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { - break :alignment 0; + break :alignment .none; } else { - break :alignment alignment; + break :alignment InternPool.Alignment.fromByteUnits(alignment); } }; - const return_type = return_type_val.optionalValue() orelse + const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - var buf: Value.ToTypeBuffer = undefined; - - const args_slice_val = args_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget())); - - const param_types = try sema.arena.alloc(Type, args_len); - const comptime_params = try sema.arena.alloc(bool, args_len); + const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod)); + const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; - var i: usize = 0; - while (i < args_len) : (i += 1) { - var arg_buf: Value.ElemValueBuffer = undefined; - const arg = args_slice_val.ptr.elemValueBuffer(mod, i, &arg_buf); - const arg_val = arg.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // is_generic: bool, - const arg_is_generic = arg_val[0].toBool(); - // is_noalias: bool, - const arg_is_noalias = arg_val[1].toBool(); - // type: ?type, - const param_type_opt_val = arg_val[2]; + for (param_types, 0..) |*param_type, i| { + const elem_val = try params_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_noalias"), + ).?); + const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); - if (arg_is_generic) { + if (param_is_generic_val.toBool()) { return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } - const param_type_val = param_type_opt_val.optionalValue() orelse + const param_type_val = opt_param_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType(&buf).copy(sema.arena); + param_type.* = param_type_val.toIntern(); - if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime()) { + if (param_is_noalias_val.toBool()) { + if (!param_type.toType().isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); } - - param_types[i] = param_type; - comptime_params[i] = false; } - var fn_info = Type.Payload.Function.Data{ + const ty = try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, + .comptime_bits = 0, .noalias_bits = noalias_bits, - .return_type = try return_type.toType(&buf).copy(sema.arena), + .return_type = return_type.toIntern(), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19346,9 +19841,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, - }; - - const ty = try Type.Tag.function.create(sema.arena, fn_info); + }); return sema.addType(ty); }, .Frame => return sema.failWithUseOfAsync(block, src), @@ -19366,22 +19859,34 @@ fn reifyStruct( name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, ) CompileError!Air.Inst.Ref { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = new_struct_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -19389,38 +19894,49 @@ fn reifyStruct( .status = .have_field_types, .known_non_opv = false, .is_tuple = is_tuple, - .namespace = .{ - .parent = block.namespace, - .ty = struct_ty, - .file_scope = block.getFileScope(), - }, - }; + .namespace = new_namespace_index, + }); + const struct_obj = mod.structPtr(struct_index); + errdefer mod.destroyStruct(struct_index); - const target = mod.getTarget(); + const struct_ty = try ip.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(struct_ty); + + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); - const field_struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // type: type, - const type_val = field_struct_val[1]; - // default_value: ?*const anyopaque, - const default_value_val = field_struct_val[2]; - // is_comptime: bool, - const is_comptime_val = field_struct_val[3]; - // alignment: comptime_int, - const alignment_val = field_struct_val[4]; + const elem_val = try fields_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "default_value"), + ).?); + const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_comptime"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); @@ -19430,21 +19946,15 @@ fn reifyStruct( return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}); } - const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), - new_decl_arena_allocator, - mod, - ); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (is_tuple) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch { - return sema.fail( - block, - src, - "tuple cannot have non-numeric field '{s}'", - .{field_name}, - ); - }; + const field_index = field_name.toUnsigned(ip) orelse return sema.fail( + block, + src, + "tuple cannot have non-numeric field '{}'", + .{field_name.fmt(ip)}, + ); if (field_index >= fields_len) { return sema.fail( @@ -19458,22 +19968,19 @@ fn reifyStruct( const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); + return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)}); } - const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { - const payload_val = if (opt_val.pointerDecl()) |opt_decl| - mod.declPtr(opt_decl).val - else - opt_val; - break :blk try payload_val.copy(new_decl_arena_allocator); - } else Value.initTag(.unreachable_value); - if (is_comptime_val.toBool() and default_val.tag() == .unreachable_value) { + const field_ty = type_val.toType(); + const default_val = if (default_value_val.optionalValue(mod)) |opt_val| + (try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse + return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known")).toIntern() + else + .none; + if (is_comptime_val.toBool() and default_val == .none) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19482,20 +19989,20 @@ fn reifyStruct( .offset = undefined, }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19505,22 +20012,22 @@ fn reifyStruct( if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .struct_field); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19536,7 +20043,7 @@ fn reifyStruct( sema.resolveTypeLayout(field.ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; - try sema.addFieldErrNote(struct_ty, index, msg, "while checking this field", .{}); + try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{}); return err; }, else => return err, @@ -19545,30 +20052,27 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } - if (backing_int_val.optionalValue()) |payload| { - var buf: Value.ToTypeBuffer = undefined; - const backing_int_ty = payload.toType(&buf); + if (backing_int_val.optionalValue(mod)) |payload| { + const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } struct_obj.status = .have_layout; } - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19580,7 +20084,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); const src_addrspace = ptr_info.@"addrspace"; if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { const msg = msg: { @@ -19594,8 +20098,8 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag() == .Optional) - try Type.optional(sema.arena, dest_ptr_ty) + const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) + try Type.optional(sema.arena, dest_ptr_ty, mod) else dest_ptr_ty; @@ -19624,6 +20128,7 @@ fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.In } fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19638,7 +20143,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), arg_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), arg_ty, .param_ty); try sema.addDeclaredHereNote(msg, arg_ty); break :msg msg; @@ -19685,6 +20190,7 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); @@ -19692,11 +20198,19 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod); + const bytes = try ty.nameAllocArena(sema.arena, mod); + const decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); @@ -19716,6 +20230,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19730,24 +20245,24 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known"); } try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); - if (dest_ty.intInfo(sema.mod.getTarget()).bits == 0) { + if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); + const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0))); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } - return sema.addConstant(dest_ty, Value.zero); + return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0)); } const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, Value.one)); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, Value.negative_one)); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, 1.0))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, -1.0))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -19755,6 +20270,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19769,7 +20285,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -19778,6 +20294,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -19790,11 +20307,10 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_ty = try sema.resolveType(block, src, extra.lhs); try sema.checkPtrType(block, type_src, ptr_ty); - const elem_ty = ptr_ty.elemType2(); - const target = sema.mod.getTarget(); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema); + const elem_ty = ptr_ty.elemType2(mod); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { const msg = msg: { const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19805,36 +20321,26 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { - const addr = val.toUnsignedInt(target); - if (!ptr_ty.isAllowzeroPtr() and addr == 0) + const addr = val.toUnsignedInt(mod); + if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)}); if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = addr, - }; - return sema.addConstant(ptr_ty, Value.initPayload(&val_payload.base)); + return sema.addConstant(ptr_ty, try mod.ptrIntValue(ptr_ty, addr)); } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag() == .Fn)) { - if (!ptr_ty.isAllowzeroPtr()) { + if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { + if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } if (ptr_align > 1) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, ptr_align - 1), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -19845,6 +20351,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19860,22 +20368,27 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (disjoint: { // Try avoiding resolving inferred error sets if we can - if (!dest_ty.isAnyError() and dest_ty.errorSetNames().len == 0) break :disjoint true; - if (!operand_ty.isAnyError() and operand_ty.errorSetNames().len == 0) break :disjoint true; - if (dest_ty.isAnyError()) break :disjoint false; - if (operand_ty.isAnyError()) break :disjoint false; - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + if (!dest_ty.isAnyError(mod) and dest_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (!operand_ty.isAnyError(mod) and operand_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (dest_ty.isAnyError(mod)) break :disjoint false; + if (operand_ty.isAnyError(mod)) break :disjoint false; + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } - if (dest_ty.tag() != .error_set_inferred and operand_ty.tag() != .error_set_inferred) + if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and + !ip.isInferredErrorSetType(operand_ty.toIntern())) + { break :disjoint true; + } try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty); try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } break :disjoint true; }) { @@ -19895,15 +20408,15 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } if (maybe_operand_val) |val| { - if (!dest_ty.isAnyError()) { - const error_name = val.castTag(.@"error").?.data.name; - if (!dest_ty.errorSetHasField(error_name)) { + if (!dest_ty.isAnyError(mod)) { + const error_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) { const msg = msg: { const msg = try sema.errMsg( block, src, - "'error.{s}' not a member of error set '{}'", - .{ error_name, dest_ty.fmt(sema.mod) }, + "'error.{}' not a member of error set '{}'", + .{ error_name.fmt(ip), dest_ty.fmt(sema.mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -19913,11 +20426,11 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and !dest_ty.isAnyError() and sema.mod.backendSupportsFeature(.error_set_has_value)) { + if (block.wantSafety() and !dest_ty.isAnyError(mod) and sema.mod.backendSupportsFeature(.error_set_has_value)) { const err_int_inst = try block.addBitCast(Type.err_int, operand); const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -19926,6 +20439,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19934,13 +20448,12 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo().data; - const dest_info = dest_ty.ptrInfo().data; + const operand_info = operand_ty.ptrInfo(mod); + const dest_info = dest_ty.ptrInfo(mod); if (!operand_info.mutable and dest_info.mutable) { const msg = msg: { const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); @@ -19972,8 +20485,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); } - const dest_is_slice = dest_ty.isSlice(); - const operand_is_slice = operand_ty.isSlice(); + const dest_is_slice = dest_ty.isSlice(mod); + const operand_is_slice = operand_ty.isSlice(mod); if (dest_is_slice and !operand_is_slice) { return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{}); } @@ -19982,32 +20495,31 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else operand; - const dest_elem_ty = dest_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(target); + const dest_align = dest_ty.ptrAlignment(mod); - const operand_elem_ty = operand_ty.elemType2(); + const operand_elem_ty = operand_ty.elemType2(mod); try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(target); + const operand_align = operand_ty.ptrAlignment(mod); // If the destination is less aligned than the source, preserve the source alignment const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - if (dest_ty.zigTypeTag() == .Optional) { - var buf: Type.Payload.ElemType = undefined; - var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; + if (dest_ty.zigTypeTag(mod) == .Optional) { + var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info)); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, mod, dest_ptr_info), mod); } else { - var dest_ptr_info = dest_ty.ptrInfo().data; + var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); + break :blk try Type.ptr(sema.arena, mod, dest_ptr_info); } }; if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(target); - const dest_elem_size = dest_elem_ty.abiSize(target); + const operand_elem_size = operand_elem_ty.abiSize(mod); + const dest_elem_size = dest_elem_ty.abiSize(mod); if (operand_elem_size != dest_elem_size) { return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); } @@ -20019,10 +20531,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(sema.mod), operand_align, + operand_ty.fmt(mod), operand_align, }); try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(sema.mod), dest_align, + dest_ty.fmt(mod), dest_align, }); try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); @@ -20032,21 +20544,18 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } - if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } - if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); - } - return sema.addConstant(aligned_dest_ty, operand_val); + return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty)); } try sema.requireRuntimeBlock(block, src, null); - if (block.wantSafety() and operand_ty.ptrAllowsZero() and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.ptrtoint, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); @@ -20062,6 +20571,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20069,12 +20579,12 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, operand_val); + return sema.addConstant(dest_ty, try mod.getCoerced(operand_val, dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -20082,6 +20592,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData } fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20089,9 +20600,9 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20102,6 +20613,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20112,9 +20624,12 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag() == .Vector; + const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) - try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) + try mod.vectorType(.{ + .len = operand_ty.vectorLen(mod), + .child = dest_scalar_ty.toIntern(), + }) else dest_scalar_ty; @@ -20122,22 +20637,21 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.coerce(block, dest_ty, operand, operand_src); } - const target = sema.mod.getTarget(); - const dest_info = dest_scalar_ty.intInfo(target); + const dest_info = dest_scalar_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(dest_ty)) |val| { return sema.addConstant(dest_ty, val); } - if (operand_scalar_ty.zigTypeTag() != .ComptimeInt) { - const operand_info = operand_ty.intInfo(target); + if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const operand_info = operand_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(sema.mod), + @tagName(dest_info.signedness), operand_ty.fmt(mod), }); } if (operand_info.bits < dest_info.bits) { @@ -20146,7 +20660,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) }, + .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ @@ -20162,23 +20676,22 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveMaybeUndefValIntable(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(dest_ty); + if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); if (!is_vector) { - return sema.addConstant( + return sema.addConstant(dest_ty, try mod.getCoerced( + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), dest_ty, - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), - ); + )); } - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); + const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20186,6 +20699,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20196,43 +20710,38 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; - var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - if (ptr_ty.zigTypeTag() == .Optional) { - dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty); + var dest_ty = try Type.ptr(sema.arena, mod, ptr_info); + if (ptr_ty.zigTypeTag(mod) == .Optional) { + dest_ty = try mod.optionalType(dest_ty.toIntern()); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(sema.mod.getTarget(), null)) |addr| { + if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { if (addr % dest_align != 0) { return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); if (block.wantSafety() and dest_align > 1 and try sema.typeHasRuntimeBits(ptr_info.pointee_type)) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = dest_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, dest_align - 1), ); - const actual_ptr = if (ptr_ty.isSlice()) + const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) else ptr; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - const ok = if (ptr_ty.isSlice()) ok: { + const ok = if (ptr_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, ptr_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); @@ -20247,51 +20756,52 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = operand_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } - const result_scalar_ty = try Type.smallestUnsignedInt(sema.arena, bits); - switch (operand_ty.zigTypeTag()) { + const result_scalar_ty = try mod.smallestUnsignedInt(bits); + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = result_scalar_ty.toIntern(), + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); - const scalar_ty = operand_ty.scalarType(); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - const count = comptimeOp(elem_val, scalar_ty, target); - elem.* = try Value.Tag.int_u64.create(sema.arena, count); + const elem_val = try val.elemValue(mod, i); + const count = comptimeOp(elem_val, scalar_ty, mod); + elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern(); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_ty, operand); } }, .Int => { - if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); - try sema.resolveLazyValue(val); - return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, target)); + if (try sema.resolveMaybeUndefLazyVal(operand)) |val| { + if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty); + return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); @@ -20302,20 +20812,20 @@ fn zirBitCount( } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = scalar_ty.intInfo(target).bits; + const bits = scalar_ty.intInfo(mod).bits; if (bits % 8 != 0) { return sema.fail( block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(sema.mod), bits }, + .{ scalar_ty.fmt(mod), bits }, ); } @@ -20323,11 +20833,11 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant(operand_ty, val); } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.byteSwap(operand_ty, target, sema.arena); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); + const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20336,20 +20846,19 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const vec_len = operand_ty.vectorLen(mod); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20371,12 +20880,12 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstant(operand_ty, val); } - const target = sema.mod.getTarget(); - switch (operand_ty.zigTypeTag()) { + const mod = sema.mod; + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.bitReverse(operand_ty, target, sema.arena); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); + const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20385,20 +20894,19 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const vec_len = operand_ty.vectorLen(mod); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20428,15 +20936,15 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty = try sema.resolveType(block, lhs_src, extra.lhs); - const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); - const target = sema.mod.getTarget(); + const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "name of field must be comptime-known"); + const mod = sema.mod; try sema.resolveTypeLayout(ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => {}, else => { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, ty); break :msg msg; @@ -20445,45 +20953,47 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 }, } - const field_index = if (ty.isTuple()) blk: { - if (mem.eql(u8, field_name, "len")) { + const field_index = if (ty.isTuple(mod)) blk: { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); } else try sema.structFieldIndex(block, ty, field_name, rhs_src); - if (ty.structFieldIsComptime(field_index)) { + if (ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "no offset available for comptime field", .{}); } - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { var bit_sum: u64 = 0; - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values(), 0..) |field, i| { if (i == field_index) { return bit_sum; } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } else unreachable; }, - else => return ty.structFieldOffset(field_index, target) * 8, + else => return ty.structFieldOffset(field_index, mod) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}), } } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}), } } @@ -20493,8 +21003,9 @@ fn checkInvalidPtrArithmetic( src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (try ty.zigTypeTagOrPoison()) { - .Pointer => switch (ty.ptrSize()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { + .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, .Many, .C => return sema.fail( block, @@ -20532,7 +21043,8 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20540,7 +21052,7 @@ fn checkPtrOperand( block, ty_src, "expected pointer, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20550,10 +21062,10 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkPtrType( @@ -20562,7 +21074,8 @@ fn checkPtrType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20570,7 +21083,7 @@ fn checkPtrType( block, ty_src, "expected pointer type, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20580,10 +21093,10 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkVectorElemType( @@ -20592,11 +21105,12 @@ fn checkVectorElemType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, - else => if (ty.isPtrAtRuntime()) return, + else => if (ty.isPtrAtRuntime(mod)) return, } - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)}); } fn checkFloatType( @@ -20605,9 +21119,10 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}), } } @@ -20617,13 +21132,14 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}), } } @@ -20637,9 +21153,10 @@ fn checkAtomicPtrOperand( ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); - var diag: target_util.AtomicPtrAlignmentDiagnostics = .{}; - const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) { + const mod = sema.mod; + var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; + const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -20656,7 +21173,7 @@ fn checkAtomicPtrOperand( block, elem_ty_src, "expected bool, integer, float, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ), }; @@ -20668,10 +21185,10 @@ fn checkAtomicPtrOperand( }; const ptr_ty = sema.typeOf(ptr); - const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) { - .Pointer => ptr_ty.ptrInfo().data, + const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { + .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -20681,7 +21198,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero"; wanted_ptr_data.@"volatile" = ptr_data.@"volatile"; - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -20695,7 +21212,7 @@ fn checkPtrIsNotComptimeMutable( operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(sema.mod)) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } @@ -20704,7 +21221,7 @@ fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!void { if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) { if (block.runtime_cond) |cond_src| { @@ -20735,20 +21252,21 @@ fn checkIntOrVector( operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (try operand_ty.zigTypeTagOrPoison()) { + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + const elem_ty = operand_ty.childType(mod); + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -20759,27 +21277,29 @@ fn checkIntOrVectorAllowComptime( operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { - switch (try operand_ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + const elem_ty = operand_ty.childType(mod); + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet => return, - else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(mod)}), } } @@ -20805,11 +21325,12 @@ fn checkSimdBinOp( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20823,7 +21344,7 @@ fn checkSimdBinOp( .lhs_val = try sema.resolveMaybeUndefVal(lhs), .rhs_val = try sema.resolveMaybeUndefVal(rhs), .result_ty = result_ty, - .scalar_ty = result_ty.scalarType(), + .scalar_ty = result_ty.scalarType(mod), }; } @@ -20836,8 +21357,9 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const mod = sema.mod; + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; const lhs_is_vector = switch (lhs_zig_ty_tag) { @@ -20850,8 +21372,8 @@ fn checkVectorizableBinaryOperands( }; if (lhs_is_vector and rhs_is_vector) { - const lhs_len = lhs_ty.arrayLen(); - const rhs_len = rhs_ty.arrayLen(); + const lhs_len = lhs_ty.arrayLen(mod); + const rhs_len = rhs_ty.arrayLen(mod); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); @@ -20865,7 +21387,7 @@ fn checkVectorizableBinaryOperands( } else { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{ - lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), + lhs_ty.fmt(mod), rhs_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -20883,7 +21405,8 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; - return Module.optionsSrc(sema.gpa, sema.mod.declPtr(block.src_decl), base_src, wanted); + const mod = sema.mod; + return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -20891,7 +21414,10 @@ fn resolveExportOptions( block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) CompileError!std.builtin.ExportOptions { +) CompileError!Module.Export.Options { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -20901,26 +21427,26 @@ fn resolveExportOptions( const section_src = sema.maybeOptionsSrc(block, src, "section"); const visibility_src = sema.maybeOptionsSrc(block, src, "visibility"); - const name_operand = try sema.fieldVal(block, src, options, "name", name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.initTag(.const_slice_u8); - const name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod); + const name_ty = Type.slice_const_u8; + const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); - const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, "section", section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section"), section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.initTag(.const_slice_u8); - const section = if (section_opt_val.optionalValue()) |section_val| - try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod) + const section_ty = Type.slice_const_u8; + const section = if (section_opt_val.optionalValue(mod)) |section_val| + try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else null; - const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility"), visibility_src); const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known"); - const visibility = visibility_val.toEnum(std.builtin.SymbolVisibility); + const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); if (name.len < 1) { return sema.fail(block, name_src, "exported symbol name cannot be empty", .{}); @@ -20932,10 +21458,10 @@ fn resolveExportOptions( }); } - return std.builtin.ExportOptions{ - .name = name, + return .{ + .name = try ip.getOrPutString(gpa, name), .linkage = linkage, - .section = section, + .section = try ip.getOrPutStringOpt(gpa, section), .visibility = visibility, }; } @@ -20948,11 +21474,12 @@ fn resolveBuiltinEnum( comptime name: []const u8, reason: []const u8, ) CompileError!@field(std.builtin, name) { + const mod = sema.mod; const ty = try sema.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - return val.toEnum(@field(std.builtin, name)); + return mod.toEnum(@field(std.builtin, name), val); } fn resolveAtomicOrder( @@ -20979,6 +21506,7 @@ fn zirCmpxchg( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, @@ -20996,12 +21524,12 @@ fn zirCmpxchg( // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); - if (elem_ty.zigTypeTag() == .Float) { + if (elem_ty.zigTypeTag(mod) == .Float) { return sema.fail( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); @@ -21023,29 +21551,34 @@ fn zirCmpxchg( return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } - const result_ty = try Type.optional(sema.arena, elem_ty); + const result_ty = try Type.optional(sema.arena, elem_ty, mod); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { - return sema.addConstant(result_ty, Value.null); + return sema.addConstant(result_ty, (try mod.intern(.{ .opt = .{ + .ty = result_ty.toIntern(), + .val = .none, + } })).toValue()); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| { if (try sema.resolveMaybeUndefVal(new_value)) |new_val| { - if (expected_val.isUndef() or new_val.isUndef()) { + if (expected_val.isUndef(mod) or new_val.isUndef(mod)) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return sema.addConstUndef(result_ty); } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { - try sema.storePtr(block, src, ptr, new_value); - break :blk Value.null; - } else try Value.Tag.opt_payload.create(sema.arena, stored_val); - - return sema.addConstant(result_ty, result_val); + const result_val = try mod.intern(.{ .opt = .{ + .ty = result_ty.toIntern(), + .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { + try sema.storePtr(block, src, ptr, new_value); + break :blk .none; + } else stored_val.toIntern(), + } }); + return sema.addConstant(result_ty, result_val.toValue()); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; @@ -21069,6 +21602,7 @@ fn zirCmpxchg( } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -21077,17 +21611,13 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); - const vector_ty = try Type.Tag.vector.create(sema.arena, .{ + const vector_ty = try mod.vectorType(.{ .len = len, - .elem_type = scalar_ty, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { - if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); - - return sema.addConstant( - vector_ty, - try Value.Tag.repeated.create(sema.arena, scalar_val), - ); + if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); + return sema.addConstant(vector_ty, try sema.splat(vector_ty, scalar_val)); } try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src); @@ -21102,31 +21632,31 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (operand_ty.zigTypeTag() != .Vector) { - return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.zigTypeTag(mod) != .Vector) { + return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } - const scalar_ty = operand_ty.childType(); + const scalar_ty = operand_ty.childType(mod); // Type-check depending on operation. switch (operation) { - .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) { + .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, - .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) { + .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, } - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); if (vec_len == 0) { // TODO re-evaluate if we should introduce a "neutral value" for some operations, // e.g. zero for add and one for mul. @@ -21134,21 +21664,20 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); + if (operand_val.isUndef(mod)) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0); - var elem_buf: Value.ElemValueBuffer = undefined; + var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try operand_val.elemValue(mod, i); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod), - .Min => accum = accum.numberMin(elem_val, target), - .Max => accum = accum.numberMax(elem_val, target), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod), + .Min => accum = accum.numberMin(elem_val, mod), + .Max => accum = accum.numberMax(elem_val, mod), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod), } } return sema.addConstant(scalar_ty, accum); @@ -21165,6 +21694,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -21177,13 +21707,13 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask = try sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); - const mask_len = switch (sema.typeOf(mask).zigTypeTag()) { - .Array, .Vector => sema.typeOf(mask).arrayLen(), + const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { + .Array, .Vector => sema.typeOf(mask).arrayLen(mod), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; - mask_ty = try Type.Tag.vector.create(sema.arena, .{ - .len = mask_len, - .elem_type = Type.i32, + mask_ty = try mod.vectorType(.{ + .len = @intCast(u32, mask_len), + .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); @@ -21200,27 +21730,28 @@ fn analyzeShuffle( mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node }; const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node }; const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node }; var a = a_arg; var b = b_arg; - const res_ty = try Type.Tag.vector.create(sema.arena, .{ + const res_ty = try mod.vectorType(.{ .len = mask_len, - .elem_type = elem_ty, + .child = elem_ty.toIntern(), }); - var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) { - .Array, .Vector => sema.typeOf(a).arrayLen(), + var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { + .Array, .Vector => sema.typeOf(a).arrayLen(mod), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), sema.typeOf(a).fmt(sema.mod), }), }; - var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) { - .Array, .Vector => sema.typeOf(b).arrayLen(), + var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { + .Array, .Vector => sema.typeOf(b).arrayLen(mod), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21230,16 +21761,16 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = maybe_a_len orelse maybe_b_len.?; - const b_len = maybe_b_len orelse a_len; + const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); + const b_len = @intCast(u32, maybe_b_len orelse a_len); - const a_ty = try Type.Tag.vector.create(sema.arena, .{ + const a_ty = try mod.vectorType(.{ .len = a_len, - .elem_type = elem_ty, + .child = elem_ty.toIntern(), }); - const b_ty = try Type.Tag.vector.create(sema.arena, .{ + const b_ty = try mod.vectorType(.{ .len = b_len, - .elem_type = elem_ty, + .child = elem_ty.toIntern(), }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21250,12 +21781,10 @@ fn analyzeShuffle( .{ b_len, b_src, b_ty }, }; - var i: usize = 0; - while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(sema.mod, i, &buf); - if (elem.isUndef()) continue; - const int = elem.toSignedInt(sema.mod.getTarget()); + for (0..@intCast(usize, mask_len)) |i| { + const elem = try mask.elemValue(sema.mod, i); + if (elem.isUndef(mod)) continue; + const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { @@ -21287,26 +21816,21 @@ fn analyzeShuffle( if (try sema.resolveMaybeUndefVal(a)) |a_val| { if (try sema.resolveMaybeUndefVal(b)) |b_val| { - const values = try sema.arena.alloc(Value, mask_len); - - i = 0; - while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem_val = mask.elemValueBuffer(sema.mod, i, &buf); - if (mask_elem_val.isUndef()) { - values[i] = Value.undef; + const values = try sema.arena.alloc(InternPool.Index, mask_len); + for (values, 0..) |*value, i| { + const mask_elem_val = try mask.elemValue(sema.mod, i); + if (mask_elem_val.isUndef(mod)) { + value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); continue; } - const int = mask_elem_val.toSignedInt(sema.mod.getTarget()); + const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); - if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); - } else { - values[i] = try b_val.elemValue(sema.mod, sema.arena, unsigned); - } + values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } - const res_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(res_ty, res_val); + return sema.addConstant(res_ty, (try mod.intern(.{ .aggregate = .{ + .ty = res_ty.toIntern(), + .storage = .{ .elems = values }, + } })).toValue()); } } @@ -21320,27 +21844,27 @@ fn analyzeShuffle( const max_src = if (a_len > b_len) a_src else b_src; const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); - const expand_mask_values = try sema.arena.alloc(Value, max_len); - i = 0; - while (i < min_len) : (i += 1) { - expand_mask_values[i] = try Value.Tag.int_u64.create(sema.arena, i); + const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); + for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } - while (i < max_len) : (i += 1) { - expand_mask_values[i] = Value.negative_one; + for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| { + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } - const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); + const expand_mask = try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .storage = .{ .elems = expand_mask_values }, + } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask, @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask, @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); } } - const mask_index = @intCast(u32, sema.air_values.items.len); - try sema.air_values.append(sema.gpa, mask); return block.addInst(.{ .tag = .shuffle, .data = .{ .ty_pl = .{ @@ -21348,7 +21872,7 @@ fn analyzeShuffle( .payload = try block.sema.addExtra(Air.Shuffle{ .a = a, .b = b, - .mask = mask_index, + .mask = mask.toIntern(), .mask_len = mask_len, }), } }, @@ -21356,6 +21880,7 @@ fn analyzeShuffle( } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -21369,16 +21894,22 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_uncoerced = try sema.resolveInst(extra.pred); const pred_ty = sema.typeOf(pred_uncoerced); - const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) { - .Vector, .Array => pred_ty.arrayLen(), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), + const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { + .Vector, .Array => pred_ty.arrayLen(mod), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; - const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64); + const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); - const bool_vec_ty = try Type.vector(sema.arena, vec_len, Type.bool); + const bool_vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = .bool_type, + }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); - const vec_ty = try Type.vector(sema.arena, vec_len, elem_ty); + const vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = elem_ty.toIntern(), + }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21387,45 +21918,40 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const maybe_b = try sema.resolveMaybeUndefVal(b); const runtime_src = if (maybe_pred) |pred_val| rs: { - if (pred_val.isUndef()) return sema.addConstUndef(vec_ty); + if (pred_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); - var buf: Value.ElemValueBuffer = undefined; - const elems = try sema.gpa.alloc(Value, vec_len); + const elems = try sema.gpa.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); + const pred_elem_val = try pred_val.elemValue(mod, i); const should_choose_a = pred_elem_val.toBool(); - if (should_choose_a) { - elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); - } else { - elem.* = b_val.elemValueBuffer(sema.mod, i, &buf); - } + elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod); } - return sema.addConstant( - vec_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(vec_ty, (try mod.intern(.{ .aggregate = .{ + .ty = vec_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { break :rs b_src; } } else { if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs a_src; } } else rs: { if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs pred_src; }; @@ -21489,6 +22015,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); @@ -21505,7 +22032,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, @@ -21535,8 +22062,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { - const target = sema.mod.getTarget(); + if (ptr_val.isComptimeMutablePtr(mod)) { const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -21544,12 +22070,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod), - .Max => stored_val.numberMax (operand_val, target), - .Min => stored_val.numberMin (operand_val, target), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod), + .Max => stored_val.numberMax (operand_val, mod), + .Min => stored_val.numberMin (operand_val, mod), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); @@ -21623,18 +22149,19 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1); const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2); const maybe_addend = try sema.resolveMaybeUndefVal(addend); + const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .Vector => {}, else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), } const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod); return sema.addConstant(ty, result_val); } else { @@ -21642,16 +22169,16 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } } else { if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend2_src; } } else rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); } if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend1_src; }; @@ -21673,6 +22200,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -21686,7 +22214,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known"); - var modifier = modifier_val.toEnum(std.builtin.CallModifier); + var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val); switch (modifier) { // These can be upgraded to comptime or nosuspend calls. .auto, .never_tail, .no_async => { @@ -21732,18 +22260,17 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple() and args_ty.tag() != .empty_struct_literal) { + if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } - var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount()); + var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty); } const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); - const ensure_result_used = extra.flags.ensure_result_used; return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null); } @@ -21757,19 +22284,21 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type); - const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); + const mod = sema.mod; + const ip = &mod.intern_pool; - if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) { + if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); } try sema.resolveTypeLayout(parent_ty); - const field_index = switch (parent_ty.zigTypeTag()) { + const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { - if (parent_ty.isTuple()) { - if (mem.eql(u8, field_name, "len")) { + if (parent_ty.isTuple(mod)) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src); @@ -21781,27 +22310,27 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } try sema.checkPtrOperand(block, ptr_src, field_ptr_ty); - const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; + const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ - .pointee_type = parent_ty.structFieldType(field_index), + .pointee_type = parent_ty.structFieldType(field_index, mod), .mutable = field_ptr_ty_info.mutable, .@"addrspace" = field_ptr_ty_info.@"addrspace", }; - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{}); } else { ptr_ty_data.@"align" = blk: { - if (parent_ty.castTag(.@"struct")) |struct_obj| { - break :blk struct_obj.data.fields.values()[field_index].abi_align; - } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { - break :blk union_obj.data.fields.values()[field_index].abi_align; + if (mod.typeToStruct(parent_ty)) |struct_obj| { + break :blk struct_obj.fields.values()[field_index].abi_align; + } else if (mod.typeToUnion(parent_ty)) |union_obj| { + break :blk union_obj.fields.values()[field_index].abi_align; } else { break :blk 0; } @@ -21815,19 +22344,24 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const payload = field_ptr_val.castTag(.field_ptr) orelse { - return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); - }; - if (payload.data.field_index != field_index) { + const field = switch (ip.indexToKey(field_ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .field => |field| field, + else => null, + }, + else => null, + } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); + + if (field.index != field_index) { const msg = msg: { const msg = try sema.errMsg( block, src, - "field '{s}' has index '{d}' but pointer value is index '{d}' of struct '{}'", + "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ - field_name, + field_name.fmt(ip), field_index, - payload.data.field_index, + field.index, parent_ty.fmt(sema.mod), }, ); @@ -21837,7 +22371,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(result_ptr, payload.data.container_ptr); + return sema.addConstant(result_ptr, field.base.toValue()); } try sema.requireRuntimeBlock(block, src, ptr_src); @@ -21913,15 +22447,14 @@ fn analyzeMinMax( ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); + const mod = sema.mod; if (operands.len == 1) return operands[0]; - const mod = sema.mod; - const target = mod.getTarget(); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, - else => unreachable, + else => @compileError("unreachable"), }; // First, find all comptime-known arguments, and get their min/max @@ -21939,32 +22472,30 @@ fn analyzeMinMax( runtime_known.unset(operand_idx); - if (cur_val.isUndef()) continue; // result is also undef - if (operand_val.isUndef()) { + if (cur_val.isUndef(mod)) continue; // result is also undef + if (operand_val.isUndef(mod)) { cur_minmax = try sema.addConstUndef(simd_op.result_ty); continue; } - try sema.resolveLazyValue(cur_val); - try sema.resolveLazyValue(operand_val); + const resolved_cur_val = try sema.resolveLazyValue(cur_val); + const resolved_operand_val = try sema.resolveLazyValue(operand_val); const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, target); + const result_val = opFunc(resolved_cur_val, resolved_operand_val, mod); cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + const lhs_elem_val = try resolved_cur_val.elemValue(mod, i); + const rhs_elem_val = try resolved_operand_val.elemValue(mod, i); + elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod); } - cur_minmax = try sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = simd_op.result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { runtime_known.unset(operand_idx); cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); @@ -21984,28 +22515,31 @@ fn analyzeMinMax( break :refined orig_ty; } - const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { - const elem_ty = orig_ty.childType(); - const len = orig_ty.vectorLen(); + const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { + const elem_ty = orig_ty.childType(mod); + const len = orig_ty.vectorLen(mod); if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_min: Value = try val.elemValue(mod, 0); var cur_max: Value = cur_min; for (1..len) |idx| { - const elem_val = try val.elemValue(mod, sema.arena, idx); - if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef - if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; - if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + const elem_val = try val.elemValue(mod, idx); + if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef + if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } - const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); - break :blk try Type.vector(sema.arena, len, refined_elem_ty); + const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); + break :blk try mod.vectorType(.{ + .len = len, + .child = refined_elem_ty.toIntern(), + }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - if (val.isUndef()) break :blk orig_ty; // can't refine undef - break :blk try Type.intFittingRange(target, sema.arena, val, val); + if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef + break :blk try mod.intFittingRange(val, val); }; // Apply the refined type to the current value - this isn't strictly necessary in the @@ -22016,7 +22550,7 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.addConstant(refined_ty, val); + cur_minmax = try sema.coerceInMemory(block, val, orig_ty, refined_ty, src); } break :refined refined_ty; @@ -22032,7 +22566,7 @@ fn analyzeMinMax( // If the comptime-known part is undef we can avoid emitting actual instructions later const known_undef = if (cur_minmax) |operand| blk: { const val = (try sema.resolveMaybeUndefVal(operand)).?; - break :blk val.isUndef(); + break :blk val.isUndef(mod); } else false; if (cur_minmax == null) { @@ -22061,29 +22595,32 @@ fn analyzeMinMax( // Finally, refine the type based on the comptime-known bound. if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); - const is_vector = unrefined_ty.zigTypeTag() == .Vector; - const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; - const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; + const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty; if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, target), - .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod, unrefined_elem_ty), + .max => try comptime_elem_ty.minInt(mod, comptime_elem_ty), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, target), + .min => try comptime_elem_ty.maxInt(mod, comptime_elem_ty), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(mod, unrefined_elem_ty), else => unreachable, }; // Find the smallest type which can contain these bounds - const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) - try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + try mod.vectorType(.{ + .len = unrefined_ty.vectorLen(mod), + .child = final_elem_ty.toIntern(), + }) else final_elem_ty; @@ -22098,7 +22635,7 @@ fn analyzeMinMax( fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { const mod = sema.mod; - const info = sema.typeOf(ptr).ptrInfo().data; + const info = sema.typeOf(ptr).ptrInfo(mod); if (info.size == .One) { // Already an array pointer. return ptr; @@ -22132,8 +22669,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); const target = sema.mod.getTarget(); + const mod = sema.mod; - if (dest_ty.isConstPtr()) { + if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); } @@ -22194,9 +22732,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { - if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22239,12 +22777,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // lowering. The AIR instruction requires pointers with element types of // equal ABI size. - if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) { + if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{}); } - const dest_elem_ty = dest_ty.elemType2(); - const src_elem_ty = src_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); + const src_elem_ty = src_ty.elemType2(mod); if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{}); } @@ -22255,7 +22793,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = val.toUnsignedInt(target); + const len = val.toUnsignedInt(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -22268,7 +22806,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Change the src from slice to a many pointer, to avoid multiple ptr // slice extractions in AIR instructions. const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } else if (dest_len == .none and len_val == null) { @@ -22276,7 +22814,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr); new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false); const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } @@ -22295,14 +22833,30 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Extract raw pointer from dest slice. The AIR instructions could support them, but // it would cause redundant machine code instructions. const new_dest_ptr_ty = sema.typeOf(new_dest_ptr); - const raw_dest_ptr = if (new_dest_ptr_ty.isSlice()) + const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) - else - new_dest_ptr; + else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: { + var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type; + assert(dest_manyptr_ty_key.flags.size == .One); + dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); + dest_manyptr_ty_key.flags.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + } else new_dest_ptr; + + const new_src_ptr_ty = sema.typeOf(new_src_ptr); + const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod)) + try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty) + else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: { + var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type; + assert(src_manyptr_ty_key.flags.size == .One); + src_manyptr_ty_key.child = src_elem_ty.toIntern(); + src_manyptr_ty_key.flags.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + } else new_src_ptr; // ok1: dest >= src + len // ok2: src >= dest + len - const src_plus_len = try sema.analyzePtrArithmetic(block, src, new_src_ptr, len, .ptr_add, src_src, src); + const src_plus_len = try sema.analyzePtrArithmetic(block, src, raw_src_ptr, len, .ptr_add, src_src, src); const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src); const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len); const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len); @@ -22320,6 +22874,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22330,25 +22887,24 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ty = sema.typeOf(dest_ptr); try checkMemOperand(sema, block, dest_src, dest_ptr_ty); - if (dest_ptr_ty.isConstPtr()) { + if (dest_ptr_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } - const dest_elem_ty = dest_ptr_ty.elemType2(); - const target = sema.mod.getTarget(); + const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len"), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; } - if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| { for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22426,6 +22982,7 @@ fn zirVarExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; @@ -22461,47 +23018,33 @@ fn zirVarExtended( else uncasted_init; - break :blk (try sema.resolveMaybeUndefVal(init)) orelse - return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known"); - } else Value.initTag(.unreachable_value); + break :blk ((try sema.resolveMaybeUndefVal(init)) orelse + return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known")).toIntern(); + } else .none; try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - const new_var = try sema.gpa.create(Module.Var); - errdefer sema.gpa.destroy(new_var); - - log.debug("created variable {*} owner_decl: {*} ({s})", .{ - new_var, sema.owner_decl, sema.owner_decl.name, - }); - - new_var.* = .{ - .owner_decl = sema.owner_decl_index, + return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ + .ty = var_ty.toIntern(), .init = init_val, + .decl = sema.owner_decl_index, + .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( + sema.gpa, + try sema.handleExternLibName(block, ty_src, lname), + )).toOptional() else .none, .is_extern = small.is_extern, - .is_mutable = true, .is_threadlocal = small.is_threadlocal, - .is_weak_linkage = false, - .lib_name = null, - }; - - if (lib_name) |lname| { - new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname); - } - - const result = try sema.addConstant( - var_ty, - try Value.Tag.variable.create(sema.arena, new_var), - ); - return result; + } })).toValue()); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node }; const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node }; @@ -22532,10 +23075,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, "alignment must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(target)); + const alignment = @intCast(u32, val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22551,7 +23094,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target)); + const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22568,10 +23111,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const addrspace_ty = try sema.getBuiltinType("AddressSpace"); const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, "addrespace must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22581,7 +23124,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk addrspace_tv.val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); } else target_util.defaultAddressSpace(target, .function); const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: { @@ -22590,16 +23133,16 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.initTag(.const_slice_u8); + const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; } - break :blk FuncLinkSection{ .explicit = try val.toAllocatedBytes(ty, sema.arena, sema.mod) }; + break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { const section_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const section_name = sema.resolveConstString(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { + const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { break :blk FuncLinkSection{ .generic = {} }; }, @@ -22616,10 +23159,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const cc_ty = try sema.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, "calling convention must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22629,7 +23172,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val); } else if (sema.owner_decl.is_exported and has_body) .C else @@ -22642,20 +23185,18 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = try val.toType(&buffer).copy(sema.arena); + const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, }; - var buffer: Value.ToTypeBuffer = undefined; - const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena); + const ty = ret_ty_tv.val.toType(); break :blk ty; } else Type.void; @@ -22727,13 +23268,14 @@ fn zirCDefine( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known"); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(rhs).zigTypeTag() != .Void) { + if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) { const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known"); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { @@ -22799,27 +23341,29 @@ fn resolvePrefetchOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); - const target = sema.mod.getTarget(); const rw_src = sema.maybeOptionsSrc(block, src, "rw"); const locality_src = sema.maybeOptionsSrc(block, src, "locality"); const cache_src = sema.maybeOptionsSrc(block, src, "cache"); - const rw = try sema.fieldVal(block, src, options, "rw", rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw"), rw_src); const rw_val = try sema.resolveConstValue(block, rw_src, rw, "prefetch read/write must be comptime-known"); - const locality = try sema.fieldVal(block, src, options, "locality", locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality"), locality_src); const locality_val = try sema.resolveConstValue(block, locality_src, locality, "prefetch locality must be comptime-known"); - const cache = try sema.fieldVal(block, src, options, "cache", cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache"), cache_src); const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known"); return std.builtin.PrefetchOptions{ - .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), - .locality = @intCast(u2, locality_val.toUnsignedInt(target)), - .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), + .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), + .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), + .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -22862,34 +23406,40 @@ fn resolveExternOptions( block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) CompileError!std.builtin.ExternOptions { +) CompileError!struct { + name: InternPool.NullTerminatedString, + library_name: InternPool.OptionalNullTerminatedString = .none, + linkage: std.builtin.GlobalLinkage = .Strong, + is_thread_local: bool = false, +} { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); - const mod = sema.mod; const name_src = sema.maybeOptionsSrc(block, src, "name"); const library_src = sema.maybeOptionsSrc(block, src, "library"); const linkage_src = sema.maybeOptionsSrc(block, src, "linkage"); const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local"); - const name_ref = try sema.fieldVal(block, src, options, "name", name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); - const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local"), thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull()) blk: { - const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { + const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -22904,9 +23454,9 @@ fn resolveExternOptions( return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{}); } - return std.builtin.ExternOptions{ - .name = name, - .library_name = library_name, + return .{ + .name = try ip.getOrPutString(gpa, name), + .library_name = try ip.getOrPutStringOpt(gpa, library_name), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; @@ -22917,21 +23467,21 @@ fn zirBuiltinExtern( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; var ty = try sema.resolveType(block, ty_src, extra.lhs); - if (!ty.isPtrAtRuntime()) { + if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } - if (!try sema.validateExternType(ty.childType(), .other)) { + if (!try sema.validateExternType(ty.childType(mod), .other)) { const msg = msg: { - const mod = sema.mod; const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -22945,52 +23495,51 @@ fn zirBuiltinExtern( else => |e| return e, }; - if (options.linkage == .Weak and !ty.ptrAllowsZero()) { - ty = try Type.optional(sema.arena, ty); + if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { + ty = try Type.optional(sema.arena, ty, mod); } // TODO check duplicate extern - const new_decl_index = try sema.mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); - errdefer sema.mod.destroyDecl(new_decl_index); - const new_decl = sema.mod.declPtr(new_decl_index); - new_decl.name = try sema.gpa.dupeZ(u8, options.name); + const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); + errdefer mod.destroyDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); + new_decl.name = options.name; { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const new_var = try new_decl_arena_allocator.create(Module.Var); - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = Value.initTag(.unreachable_value), + const new_var = try mod.intern(.{ .variable = .{ + .ty = ty.toIntern(), + .init = .none, + .decl = sema.owner_decl_index, .is_extern = true, - .is_mutable = false, + .is_const = true, .is_threadlocal = options.is_thread_local, .is_weak_linkage = options.linkage == .Weak, - .lib_name = null, - }; + } }); new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.Tag.init(.anyopaque); - new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); + new_decl.ty = ty; + new_decl.val = new_var.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; - new_decl.generation = sema.mod.generation; - - try new_decl.finalizeNewArena(&new_decl_arena); + new_decl.generation = mod.generation; } - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); - const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - return sema.addConstant(ty, ref); + return sema.addConstant(ty, try mod.getCoerced((try mod.intern(.{ .ptr = .{ + .ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => ty.toIntern(), + .opt_type => |child_type| child_type, + else => unreachable, + }, + .addr = .{ .decl = new_decl_index }, + } })).toValue(), ty)); } fn zirWorkItem( @@ -23073,7 +23622,7 @@ fn validateVarType( const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), var_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), var_ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -23086,8 +23635,8 @@ fn validateVarType( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); - if (var_ty.zigTypeTag() == .ComptimeInt or var_ty.zigTypeTag() == .ComptimeFloat) { + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), var_ty); + if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23101,8 +23650,9 @@ fn validateRunTimeType( var_ty: Type, is_extern: bool, ) CompileError!bool { + const mod = sema.mod; var ty = var_ty; - while (true) switch (ty.zigTypeTag()) { + while (true) switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23125,23 +23675,22 @@ fn validateRunTimeType( => return false, .Pointer => { - const elem_ty = ty.childType(); - switch (elem_ty.zigTypeTag()) { + const elem_ty = ty.childType(mod); + switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, - .Fn => return elem_ty.isFnOrHasRuntimeBits(), + .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), else => ty = elem_ty, } }, .Opaque => return is_extern, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); return sema.validateRunTimeType(child_ty, is_extern); }, - .Array, .Vector => ty = ty.elemType(), + .Array, .Vector => ty = ty.childType(mod), - .ErrorUnion => ty = ty.errorUnionPayload(), + .ErrorUnion => ty = ty.errorUnionPayload(mod), .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(ty); @@ -23151,7 +23700,7 @@ fn validateRunTimeType( }; } -const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage); +const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void); fn explainWhyTypeIsComptime( sema: *Sema, @@ -23174,7 +23723,7 @@ fn explainWhyTypeIsComptimeInner( type_set: *TypeSet, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23208,12 +23757,12 @@ fn explainWhyTypeIsComptimeInner( }, .Array, .Vector => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Pointer => { - const elem_ty = ty.elemType2(); - if (elem_ty.zigTypeTag() == .Fn) { - const fn_info = elem_ty.fnInfo(); + const elem_ty = ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Fn) { + const fn_info = mod.typeToFunc(elem_ty).?; if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); } @@ -23221,29 +23770,27 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly()) { + if (fn_info.return_type.toType().comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; } - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(&buf), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set); }, .Struct => { - if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; + if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { - const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -23258,12 +23805,11 @@ fn explainWhyTypeIsComptimeInner( }, .Union => { - if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; + if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; - if (ty.cast(Type.Payload.Union)) |payload| { - const union_obj = payload.data; + if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { - const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -23295,7 +23841,8 @@ fn validateExternType( ty: Type, position: ExternPosition, ) !bool { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23313,8 +23860,8 @@ fn validateExternType( .Float, .AnyFrame, => return true, - .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), - .Int => switch (ty.intInfo(sema.mod.getTarget()).bits) { + .Pointer => return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)), + .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, }, @@ -23323,20 +23870,18 @@ fn validateExternType( const target = sema.mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. - if (ty.fnCallingConvention() == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { + if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { return true; } - return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); + return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { - var buf: Type.Payload.Bits = undefined; - return sema.validateExternType(ty.intTagType(&buf), position); + return sema.validateExternType(ty.intTagType(mod), position); }, - .Struct, .Union => switch (ty.containerLayout()) { + .Struct, .Union => switch (ty.containerLayout(mod)) { .Extern => return true, .Packed => { - const target = sema.mod.getTarget(); - const bit_size = try ty.bitSizeAdvanced(target, sema); + const bit_size = try ty.bitSizeAdvanced(mod, sema); switch (bit_size) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23346,10 +23891,10 @@ fn validateExternType( }, .Array => { if (position == .ret_ty or position == .param_ty) return false; - return sema.validateExternType(ty.elemType2(), .element); + return sema.validateExternType(ty.elemType2(mod), .element); }, - .Vector => return sema.validateExternType(ty.elemType2(), .element), - .Optional => return ty.isPtrLikeOptional(), + .Vector => return sema.validateExternType(ty.elemType2(mod), .element), + .Optional => return ty.isPtrLikeOptional(mod), } } @@ -23361,7 +23906,7 @@ fn explainWhyTypeIsNotExtern( position: ExternPosition, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, .Float, @@ -23380,17 +23925,17 @@ fn explainWhyTypeIsNotExtern( => return, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { - const pointee_ty = ty.childType(); + const pointee_ty = ty.childType(mod); try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty); } }, .Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), .NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), - .Int => if (!std.math.isPowerOfTwo(ty.intInfo(sema.mod.getTarget()).bits)) { + .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) { try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{}); } else { try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{}); @@ -23401,7 +23946,7 @@ fn explainWhyTypeIsNotExtern( try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{}); return; } - switch (ty.fnCallingConvention()) { + switch (ty.fnCallingConvention(mod)) { .Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}), .Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}), .Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}), @@ -23409,8 +23954,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - var buf: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buf); + const tag_ty = ty.intTagType(mod); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -23422,17 +23966,17 @@ fn explainWhyTypeIsNotExtern( } else if (position == .param_ty) { return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{}); } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element); }, - .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element), + .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element), .Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}), } } /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type) bool { - switch (ty.zigTypeTag()) { +fn validatePackedType(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23448,7 +23992,7 @@ fn validatePackedType(ty: Type) bool { .Fn, .Array, => return false, - .Optional => return ty.isPtrLikeOptional(), + .Optional => return ty.isPtrLikeOptional(mod), .Void, .Bool, .Float, @@ -23456,8 +24000,8 @@ fn validatePackedType(ty: Type) bool { .Vector, .Enum, => return true, - .Pointer => return !ty.isSlice(), - .Struct, .Union => return ty.containerLayout() == .Packed, + .Pointer => return !ty.isSlice(mod), + .Struct, .Union => return ty.containerLayout(mod) == .Packed, } } @@ -23468,7 +24012,7 @@ fn explainWhyTypeIsNotPacked( ty: Type, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .Bool, .Float, @@ -23616,7 +24160,6 @@ fn panicWithMsg( msg_inst: Air.Inst.Ref, ) !void { const mod = sema.mod; - const arena = sema.arena; if (!mod.backendSupportsFeature(.panic_fn)) { _ = try block.addNoOp(.trap); @@ -23626,16 +24169,24 @@ fn panicWithMsg( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const target = mod.getTarget(); - const ptr_stack_trace_ty = try Type.ptr(arena, mod, .{ - .pointee_type = stack_trace_ty, - .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + const ptr_stack_trace_ty = try mod.ptrType(.{ + .child = stack_trace_ty.toIntern(), + .flags = .{ + .address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + }, }); - const null_stack_trace = try sema.addConstant( - try Type.optional(arena, ptr_stack_trace_ty), - Value.null, - ); - const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value }; - try sema.callBuiltin(block, panic_fn, .auto, &args); + const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); + const null_stack_trace = try sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); + + const opt_usize_ty = try mod.optionalType(.usize_type); + const null_ret_addr = try sema.addConstant(opt_usize_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_usize_ty.toIntern(), + .val = .none, + } })).toValue()); + try sema.callBuiltin(block, panic_fn, .auto, &.{ msg_inst, null_stack_trace, null_ret_addr }); } fn panicUnwrapError( @@ -23694,20 +24245,6 @@ fn panicIndexOutOfBounds( try sema.safetyCheckFormatted(parent_block, ok, "panicOutOfBounds", &.{ index, len }); } -fn panicStartGreaterThanEnd( - sema: *Sema, - parent_block: *Block, - start: Air.Inst.Ref, - end: Air.Inst.Ref, -) !void { - assert(!parent_block.is_comptime); - const ok = try parent_block.addBinOp(.cmp_lte, start, end); - if (!sema.mod.comp.formatted_panics) { - return sema.addSafetyCheck(parent_block, ok, .start_index_greater_than_end); - } - try sema.safetyCheckFormatted(parent_block, ok, "panicStartGreaterThanEnd", &.{ start, end }); -} - fn panicInactiveUnionField( sema: *Sema, parent_block: *Block, @@ -23731,11 +24268,12 @@ fn panicSentinelMismatch( sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); + const mod = sema.mod; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); const ptr_ty = sema.typeOf(ptr); - const actual_sentinel = if (ptr_ty.isSlice()) + const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); @@ -23743,7 +24281,7 @@ fn panicSentinelMismatch( break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; - const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: { + const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq); break :ok try parent_block.addInst(.{ @@ -23753,7 +24291,7 @@ fn panicSentinelMismatch( .operation = .And, } }, }); - } else if (sentinel_ty.isSelfComparable(true)) + } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); @@ -23805,12 +24343,14 @@ fn safetyPanic( block: *Block, panic_id: PanicId, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; const panic_messages_ty = try sema.getBuiltinType("panic_messages"); const msg_decl_index = (try sema.namespaceLookup( block, sema.src, - panic_messages_ty.getNamespace().?, - @tagName(panic_id), + panic_messages_ty.getNamespaceIndex(mod).unwrap().?, + try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)), )).?; const msg_inst = try sema.analyzeDeclVal(block, sema.src, msg_decl_index); @@ -23842,37 +24382,38 @@ fn fieldVal( block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. - const arena = sema.arena; + const mod = sema.mod; + const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); - } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { - const ptr_info = object_ty.ptrInfo().data; - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = ptr_info.pointee_type.childType(), + } else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) { + const ptr_info = object_ty.ptrInfo(mod); + const result_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -23889,21 +24430,21 @@ fn fieldVal( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, .Pointer => { - const ptr_info = inner_ty.ptrInfo().data; + const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - if (mem.eql(u8, field_name, "ptr")) { + if (ip.stringEqlSlice(field_name, "ptr")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, object_src, slice, inner_ty); - } else if (mem.eql(u8, field_name, "len")) { + } else if (ip.stringEqlSlice(field_name, "len")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else @@ -23913,8 +24454,8 @@ fn fieldVal( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } } @@ -23926,66 +24467,74 @@ fn fieldVal( object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (try child_type.zigTypeTagOrPoison()) { + switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - const msg = msg: { - const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), - }); - errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, child_type); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } else (try sema.mod.getErrorValue(field_name)).key; + switch (ip.indexToKey(child_type.toIntern())) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, field_name) != null) break :blk; + const msg = msg: { + const msg = try sema.errMsg(block, src, "no error named '{}' in '{}'", .{ + field_name.fmt(ip), child_type.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try sema.addDeclaredHereNote(msg, child_type); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, + else => unreachable, + } - return sema.addConstant( - if (!child_type.isAnyError()) - try child_type.copy(arena) - else - try Type.Tag.error_set_single.create(arena, name), - try Value.Tag.@"error".create(arena, .{ .name = name }), - ); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetType(field_name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = field_name, + } })).toValue()); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { + if (union_ty.unionTagType(mod)) |enum_ty| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, - try Value.Tag.enum_field_index.create(sema.arena, field_index), + try mod.enumValueFieldIndex(enum_ty, field_index), ); } } return sema.failWithBadMemberAccess(block, union_ty, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } - const field_index_usize = child_type.enumFieldIndex(field_name) orelse + const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); - const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); - return sema.addConstant(try child_type.copy(arena), enum_val); + const enum_val = try mod.enumValueFieldIndex(child_type, field_index); + return sema.addConstant(child_type, enum_val); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -23994,10 +24543,10 @@ fn fieldVal( }, else => { const msg = msg: { - const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)}); errdefer msg.destroy(sema.gpa); - if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); - if (child_type.zigTypeTag() == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); + if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); + if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24028,50 +24577,52 @@ fn fieldPtr( block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, initializing: bool, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; + const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); - const object_ty = switch (object_ptr_ty.zigTypeTag()) { - .Pointer => object_ptr_ty.elemType(), - else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), + const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { + .Pointer => object_ptr_ty.childType(mod), + else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), 0, // default alignment )); } else { return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, - .Pointer => if (inner_ty.isSlice()) { + .Pointer => if (inner_ty.isSlice(mod)) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else @@ -24079,47 +24630,44 @@ fn fieldPtr( const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; - if (mem.eql(u8, field_name, "ptr")) { - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); + if (ip.stringEqlSlice(field_name, "ptr")) { + const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = slice_ptr_ty, - .mutable = attr_ptr_ty.ptrIsMutable(), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.ptr_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.toIntern(), + .addr = .{ .field = .{ + .base = val.toIntern(), + .index = Value.slice_ptr_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); - } else if (mem.eql(u8, field_name, "len")) { - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + } else if (ip.stringEqlSlice(field_name, "len")) { + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = Type.usize, - .mutable = attr_ptr_ty.ptrIsMutable(), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.len_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.toIntern(), + .addr = .{ .field = .{ + .base = val.toIntern(), + .index = Value.slice_len_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24128,8 +24676,8 @@ fn fieldPtr( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, @@ -24142,47 +24690,59 @@ fn fieldPtr( result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .ErrorSet => { - // TODO resolve inferred error sets - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), - }); - } else (try sema.mod.getErrorValue(field_name)).key; + switch (ip.indexToKey(child_type.toIntern())) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, field_name) != null) { + break :blk; + } + return sema.fail(block, src, "no error named '{}' in '{}'", .{ + field_name.fmt(ip), child_type.fmt(mod), + }); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, + else => unreachable, + } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetType(field_name); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError()) - try child_type.copy(anon_decl.arena()) - else - try Type.Tag.error_set_single.create(anon_decl.arena(), name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), + error_set_type, + (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = field_name, + } })).toValue(), 0, // default alignment )); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index| { + if (union_ty.unionTagType(mod)) |enum_ty| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try enum_ty.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + enum_ty, + try mod.enumValueFieldIndex(enum_ty, field_index_u32), 0, // default alignment )); } @@ -24190,32 +24750,32 @@ fn fieldPtr( return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } - const field_index = child_type.enumFieldIndex(field_name) orelse { + const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try child_type.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + child_type, + try mod.enumValueFieldIndex(child_type, field_index_u32), 0, // default alignment )); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}), } }, .Struct => { @@ -24252,22 +24812,24 @@ fn fieldCallBind( block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!ResolvedFieldCallee { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; + const ip = &mod.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) - raw_ptr_ty.childType() + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) + raw_ptr_ty.childType(mod) else - return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); + return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; - const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; + const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) @@ -24275,37 +24837,37 @@ fn fieldCallBind( raw_ptr; find_field: { - switch (concrete_ty.zigTypeTag()) { + switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const field_index_usize = struct_obj.data.fields.getIndex(field_name) orelse + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.data.fields.values()[field_index]; + const field = struct_obj.fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); - } else if (struct_ty.isTuple()) { - if (mem.eql(u8, field_name, "len")) { - return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()) }; + } else if (struct_ty.isTuple(mod)) { + if (ip.stringEqlSlice(field_name, "len")) { + return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; + } + if (field_name.toUnsigned(ip)) |field_index| { + if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); } - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index >= struct_ty.structFieldCount()) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index), field_index, object_ptr); - } else |_| {} } else { - const max = struct_ty.structFieldCount(); - var i: u32 = 0; - while (i < max) : (i += 1) { - if (mem.eql(u8, struct_ty.structFieldName(i), field_name)) { - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i), i, object_ptr); + const max = struct_ty.structFieldCount(mod); + for (0..max) |i_usize| { + const i = @intCast(u32, i_usize); + if (field_name == struct_ty.structFieldName(i, mod)) { + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } } } }, .Union => { const union_ty = try sema.resolveTypeFields(concrete_ty); - const fields = union_ty.unionFields(); + const fields = union_ty.unionFields(mod); const field_index_usize = fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); const field = fields.values()[field_index]; @@ -24321,24 +24883,23 @@ fn fieldCallBind( } // If we get here, we need to look for a decl in the struct type instead. - const found_decl = switch (concrete_ty.zigTypeTag()) { + const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { - if (concrete_ty.getNamespace()) |namespace| { + if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag() == .Fn and - decl_type.fnParamLen() >= 1) - { - const first_param_type = decl_type.fnParamType(0); - const first_param_tag = first_param_type.tag(); + if (mod.typeToFunc(decl_type)) |func_type| f: { + if (func_type.param_types.len == 0) break :f; + + const first_param_type = func_type.param_types[0].toType(); // zig fmt: off - if (first_param_tag == .generic_poison or ( - first_param_type.zigTypeTag() == .Pointer and - (first_param_type.ptrSize() == .One or - first_param_type.ptrSize() == .C) and - first_param_type.childType().eql(concrete_ty, sema.mod))) + if (first_param_type.isGenericPoison() or ( + first_param_type.zigTypeTag(mod) == .Pointer and + (first_param_type.ptrSize(mod) == .One or + first_param_type.ptrSize(mod) == .C) and + first_param_type.childType(mod).eql(concrete_ty, mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24350,32 +24911,31 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = object_ptr, } }; - } else if (first_param_type.eql(concrete_ty, sema.mod)) { + } else if (first_param_type.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (first_param_type.zigTypeTag() == .Optional) { - var opt_buf: Type.Payload.ElemType = undefined; - const child = first_param_type.optionalChild(&opt_buf); - if (child.eql(concrete_ty, sema.mod)) { + } else if (first_param_type.zigTypeTag(mod) == .Optional) { + const child = first_param_type.optionalChild(mod); + if (child.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (child.zigTypeTag() == .Pointer and - child.ptrSize() == .One and - child.childType().eql(concrete_ty, sema.mod)) + } else if (child.zigTypeTag(mod) == .Pointer and + child.ptrSize(mod) == .One and + child.childType(mod).eql(concrete_ty, mod)) { return .{ .method = .{ .func_inst = decl_val, .arg0_inst = object_ptr, } }; } - } else if (first_param_type.zigTypeTag() == .ErrorUnion and - first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) + } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and + first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24393,12 +24953,15 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{}' in '{}'", .{ + field_name.fmt(ip), + concrete_ty.fmt(mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { - const decl = sema.mod.declPtr(decl_idx); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "'{s}' is not a member function", .{field_name}); + const decl = mod.declPtr(decl_idx); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{}' is not a member function", .{field_name.fmt(ip)}); } break :msg msg; }; @@ -24414,29 +24977,29 @@ fn finishFieldCallBind( field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { + const mod = sema.mod; const arena = sema.arena; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field_ty, - .mutable = ptr_ty.ptrIsMutable(), - .@"addrspace" = ptr_ty.ptrAddressSpace(), + .mutable = ptr_ty.ptrIsMutable(mod), + .@"addrspace" = ptr_ty.ptrAddressSpace(mod), }); - const container_ty = ptr_ty.childType(); - if (container_ty.zigTypeTag() == .Struct) { - if (container_ty.structFieldValueComptime(field_index)) |default_val| { + const container_ty = ptr_ty.childType(mod); + if (container_ty.zigTypeTag(mod) == .Struct) { + if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const pointer = try sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = container_ty, - .field_index = field_index, - }), - ); + const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = struct_ptr_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -24449,19 +25012,20 @@ fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - decl_name: []const u8, + namespace: Namespace.Index, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Decl.Index { + const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ - decl_name, + const msg = try sema.errMsg(block, src, "'{}' is not marked 'pub'", .{ + decl_name.fmt(&mod.intern_pool), }); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24475,8 +25039,8 @@ fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - decl_name: []const u8, + namespace: Namespace.Index, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; try sema.addReferencedBy(block, src, decl); @@ -24487,8 +25051,8 @@ fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - decl_name: []const u8, + namespace: Namespace.Index, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclVal(block, src, decl); @@ -24499,29 +25063,30 @@ fn structFieldPtr( block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); - if (struct_ty.isTuple()) { - if (mem.eql(u8, field_name, "len")) { - const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()); + if (struct_ty.isTuple(mod)) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { + const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); - } else if (struct_ty.isAnonStruct()) { + } else if (struct_ty.isAnonStruct(mod)) { const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); @@ -24540,14 +25105,15 @@ fn structFieldPtrByIndex( struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - if (struct_ty.isAnonStruct()) { + const mod = sema.mod; + if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); - const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data; + const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = field.ty, @@ -24556,7 +25122,7 @@ fn structFieldPtrByIndex( .@"addrspace" = struct_ptr_ty_info.@"addrspace", }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (struct_obj.layout == .Packed) { comptime assert(Type.packed_struct_layout_version == 2); @@ -24568,7 +25134,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(target)); + running_bits += @intCast(u16, f.ty.bitSize(mod)); } ptr_ty_data.host_size = (running_bits + 7) / 8; @@ -24582,7 +25148,7 @@ fn structFieldPtrByIndex( const parent_align = if (struct_ptr_ty_info.@"align" != 0) struct_ptr_ty_info.@"align" else - struct_ptr_ty_info.pointee_type.abiAlignment(target); + struct_ptr_ty_info.pointee_type.abiAlignment(mod); ptr_ty_data.@"align" = parent_align; // If the field happens to be byte-aligned, simplify the pointer type. @@ -24596,8 +25162,8 @@ fn structFieldPtrByIndex( if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and target.cpu.arch.endian() == .Little) { - const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target); - const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target); + const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod); + const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.bit_offset / 8; const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align)); @@ -24610,25 +25176,25 @@ fn structFieldPtrByIndex( ptr_ty_data.@"align" = field.abi_align; } - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); + const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = try field.ty.copy(sema.arena), - .field_val = try field.default_val.copy(sema.arena), - }); - return sema.addConstant(ptr_field_ty, val); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = field.default_val }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(), - .field_index = field_index, - }), - ); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = try struct_ptr_val.intern(struct_ptr_ty, mod), + .index = field_index, + } }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24640,21 +25206,17 @@ fn structFieldVal( block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (struct_ty.tag()) { - .tuple, .empty_struct_literal => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .anon_struct => { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); - }, - .@"struct" => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); const field_index_usize = struct_obj.fields.getIndex(field_name) orelse @@ -24663,22 +25225,28 @@ fn structFieldVal( const field = struct_obj.fields.values()[field_index]; if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); + return sema.addConstant(field.ty, field.default_val.toValue()); } if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef()) return sema.addConstUndef(field.ty); + if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } - - const field_values = struct_val.castTag(.aggregate).?.data; - return sema.addConstant(field.ty, field_values[field_index]); + return sema.addConstant(field.ty, try struct_val.fieldValue(mod, field_index)); } try sema.requireRuntimeBlock(block, src, null); return block.addStructFieldVal(struct_byval, field_index, field.ty); }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len == 0) { + return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); + } else { + const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); + return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); + } + }, else => unreachable, } } @@ -24688,12 +25256,13 @@ fn tupleFieldVal( block: *Block, src: LazySrcLoc, tuple_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { - if (mem.eql(u8, field_name, "len")) { - return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount()); + const mod = sema.mod; + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { + return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty); @@ -24704,19 +25273,20 @@ fn tupleFieldIndex( sema: *Sema, block: *Block, tuple_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { - assert(!std.mem.eql(u8, field_name, "len")); - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index < tuple_ty.structFieldCount()) return field_index; - return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + const mod = sema.mod; + assert(!mod.intern_pool.stringEqlSlice(field_name, "len")); + if (field_name.toUnsigned(&mod.intern_pool)) |field_index| { + if (field_index < tuple_ty.structFieldCount(mod)) return field_index; + return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{ + field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod), }); - } else |_| {} + } - return sema.fail(block, field_name_src, "no field named '{s}' in tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{ + field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod), }); } @@ -24728,22 +25298,29 @@ fn tupleFieldValByIndex( field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { - const field_ty = tuple_ty.structFieldType(field_index); + const mod = sema.mod; + const field_ty = tuple_ty.structFieldType(field_index, mod); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return sema.addConstant(field_ty, opv); } - const field_values = tuple_val.castTag(.aggregate).?.data; - return sema.addConstant(field_ty, field_values[field_index]); + return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) { + .undef => sema.addConstUndef(field_ty), + .aggregate => |aggregate| sema.addConstant(field_ty, switch (aggregate.storage) { + .bytes => |bytes| try mod.intValue(Type.u8, bytes[0]), + .elems => |elems| elems[field_index].toValue(), + .repeated_elem => |elem| elem.toValue(), + }), + else => unreachable, + }; } - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -24756,33 +25333,38 @@ fn unionFieldPtr( block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + const ip = &mod.intern_pool; + + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field.ty, - .mutable = union_ptr_ty.ptrIsMutable(), - .@"volatile" = union_ptr_ty.isVolatilePtr(), - .@"addrspace" = union_ptr_ty.ptrAddressSpace(), + .mutable = union_ptr_ty.ptrIsMutable(mod), + .@"volatile" = union_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); - if (initializing and field.ty.zigTypeTag() == .NoReturn) { + if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -24794,21 +25376,20 @@ fn unionFieldPtr( .Auto => if (!initializing) { const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse break :ct; - if (union_val.isUndef()) { + if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } - const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const un = ip.indexToKey(union_val.toIntern()).un; + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); + const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); + const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{ + field_name.fmt(ip), + active_field_name.fmt(ip), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -24818,28 +25399,27 @@ fn unionFieldPtr( }, .Packed, .Extern => {}, } - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = union_ptr_val, - .container_ty = union_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = union_ptr_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); if (!initializing and union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24851,37 +25431,37 @@ fn unionFieldVal( block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + const ip = &mod.intern_pool; + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { - if (union_val.isUndef()) return sema.addConstUndef(field.ty); + if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); - const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const un = ip.indexToKey(union_val.toIntern()).un; + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); + const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.layout) { .Auto => { if (tag_matches) { - return sema.addConstant(field.ty, tag_and_val.val); + return sema.addConstant(field.ty, un.val.toValue()); } else { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); + const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{ + field_name.fmt(ip), active_field_name.fmt(ip), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -24891,10 +25471,10 @@ fn unionFieldVal( }, .Packed, .Extern => { if (tag_matches) { - return sema.addConstant(field.ty, tag_and_val.val); + return sema.addConstant(field.ty, un.val.toValue()); } else { - const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod); - if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| { + const old_ty = union_ty.unionFieldType(un.tag.toValue(), mod); + if (try sema.bitCastVal(block, src, un.val.toValue(), old_ty, field.ty, 0)) |new_val| { return sema.addConstant(field.ty, new_val); } } @@ -24904,14 +25484,14 @@ fn unionFieldVal( try sema.requireRuntimeBlock(block, src, null); if (union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24928,22 +25508,22 @@ fn elemPtr( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); - const target = sema.mod.getTarget(); - const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) { - .Pointer => indexable_ptr_ty.elemType(), - else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), + const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { + .Pointer => indexable_ptr_ty.childType(mod), + else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}), }; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -24966,11 +25546,11 @@ fn elemPtrOneLayerOnly( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.ptrSize()) { + switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -24978,9 +25558,9 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const result_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(result_ty, elem_ptr); }; const result_ty = try sema.elemPtrType(indexable_ty, null); @@ -24989,7 +25569,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25006,7 +25586,7 @@ fn elemVal( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -25014,8 +25594,8 @@ fn elemVal( // index is a scalar or vector instead of unconditionally casting to usize. const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); - switch (indexable_ty.zigTypeTag()) { - .Pointer => switch (indexable_ty.ptrSize()) { + switch (indexable_ty.zigTypeTag(mod)) { + .Pointer => switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25024,10 +25604,14 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); - if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(), elem_val); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const elem_ty = indexable_ty.elemType2(mod); + const many_ptr_ty = try mod.manyConstPtrType(elem_ty); + const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); + const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); + const elem_ptr_val = try many_ptr_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { + return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty)); } break :rs indexable_src; }; @@ -25036,7 +25620,19 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + const array_ty = indexable_ty.childType(mod); // Guaranteed by checkIndexable + assert(array_ty.zigTypeTag(mod) == .Array); + + if (array_ty.sentinel(mod)) |sentinel| { + // index must be defined since it can access out of bounds + if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + if (index == array_ty.arrayLen(mod)) { + return sema.addConstant(array_ty.childType(mod), sentinel); + } + } + } + const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25049,7 +25645,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -25064,6 +25660,7 @@ fn validateRuntimeElemAccess( parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { + const mod = sema.mod; const valid_rt = try sema.validateRunTimeType(elem_ty, false); if (!valid_rt) { const msg = msg: { @@ -25071,12 +25668,12 @@ fn validateRuntimeElemAccess( block, elem_index_src, "values of type '{}' must be comptime-known, but index value is runtime-known", - .{parent_ty.fmt(sema.mod)}, + .{parent_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl), parent_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl, mod), parent_ty); break :msg msg; }; @@ -25093,10 +25690,11 @@ fn tupleFieldPtr( field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); - const tuple_ty = tuple_ptr_ty.childType(); + const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{}); @@ -25108,31 +25706,29 @@ fn tupleFieldPtr( }); } - const field_ty = tuple_ty.structFieldType(field_index); - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const field_ty = tuple_ty.structFieldType(field_index, mod); + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, - .mutable = tuple_ptr_ty.ptrIsMutable(), - .@"volatile" = tuple_ptr_ty.isVolatilePtr(), - .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), + .mutable = tuple_ptr_ty.ptrIsMutable(mod), + .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field_ty, - .field_val = default_val, - }); - return sema.addConstant(ptr_field_ty, val); + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = default_val.toIntern() }, + } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = tuple_ptr_val, - .container_ty = tuple_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = tuple_ptr_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); } if (!init) { @@ -25151,8 +25747,9 @@ fn tupleField( field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{}); @@ -25164,15 +25761,15 @@ fn tupleField( }); } - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index)); + if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); + return sema.addConstant(field_ty, try tuple_val.fieldValue(mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25191,11 +25788,12 @@ fn elemValArray( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const array_ty = sema.typeOf(array); - const array_sent = array_ty.sentinel(); - const array_len = array_ty.arrayLen(); + const array_sent = array_ty.sentinel(mod); + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent != null); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (array_len_s == 0) { return sema.fail(block, array_src, "indexing into empty array is not allowed", .{}); @@ -25204,10 +25802,9 @@ fn elemValArray( const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array); // index must be defined since it can access out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(elem_ty, s); @@ -25219,12 +25816,12 @@ fn elemValArray( } } if (maybe_undef_array_val) |array_val| { - if (array_val.isUndef()) { + if (array_val.isUndef(mod)) { return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_ty, elem_val); } } @@ -25255,11 +25852,11 @@ fn elemPtrArray( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); - const array_ty = array_ptr_ty.childType(); - const array_sent = array_ty.sentinel() != null; - const array_len = array_ty.arrayLen(); + const array_ty = array_ptr_ty.childType(mod); + const array_sent = array_ty.sentinel(mod) != null; + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent); if (array_len_s == 0) { @@ -25269,7 +25866,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -25280,17 +25877,17 @@ fn elemPtrArray( const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); if (maybe_undef_array_ptr_val) |array_ptr_val| { - if (array_ptr_val.isUndef()) { + if (array_ptr_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, sema.mod); + const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } if (!init) { - try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(), array_ty, array_ptr_src); + try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src); } const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src; @@ -25316,32 +25913,33 @@ fn elemValSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; - const elem_ty = slice_ty.elemType2(); + const slice_sent = slice_ty.sentinel(mod) != null; + const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; // slice must be defined since it can dereferenced as null const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice); // index must be defined since it can index out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); - if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } runtime_src = slice_src; @@ -25353,7 +25951,7 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)) + try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25373,24 +25971,24 @@ fn elemPtrSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); break :o index; } else null; const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); if (maybe_undef_slice_val) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -25400,7 +25998,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25412,8 +26010,8 @@ fn elemPtrSlice( if (oob_safety and block.wantSafety()) { const len_inst = len: { if (maybe_undef_slice_val) |slice_val| - if (!slice_val.isUndef()) - break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); + if (!slice_val.isUndef(mod)) + break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25455,16 +26053,17 @@ const CoerceOpts = struct { fn get(info: @This(), sema: *Sema) !?Module.SrcLoc { if (info.func_inst == .none) return null; + const mod = sema.mod; const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null; - const param_src = Module.paramSrc(0, sema.gpa, fn_decl, info.param_i); + const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i); if (param_src == .node_offset_param) { return Module.SrcLoc{ - .file_scope = fn_decl.getFileScope(), + .file_scope = fn_decl.getFileScope(mod), .parent_decl_node = fn_decl.src_node, .lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param), }; } - return param_src.toSrcLoc(fn_decl); + return param_src.toSrcLoc(fn_decl, mod); } } = .{}, }; @@ -25477,34 +26076,30 @@ fn coerceExtra( inst_src: LazySrcLoc, opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { - switch (dest_ty_unresolved.tag()) { - .generic_poison => return inst, - else => {}, - } + if (dest_ty_unresolved.isGenericPoison()) return inst; + const mod = sema.mod; const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // If the types are the same, we can return the operand. - if (dest_ty.eql(inst_ty, sema.mod)) + if (dest_ty.eql(inst_ty, mod)) return inst; - const arena = sema.arena; const maybe_inst_val = try sema.resolveMaybeUndefVal(inst); var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - // Keep the comptime Value representation; take the new type. - return sema.addConstant(dest_ty, val); + return sema.coerceInMemory(block, val, inst_ty, dest_ty, dest_ty_src); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); } - const is_undef = inst_ty.zigTypeTag() == .Undefined; + const is_undef = inst_ty.zigTypeTag(mod) == .Undefined; - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Optional => optional: { // undefined sets the optional bit also to undefined. if (is_undef) { @@ -25512,18 +26107,22 @@ fn coerceExtra( } // null to ?T - if (inst_ty.zigTypeTag() == .Null) { - return sema.addConstant(dest_ty, Value.null); + if (inst_ty.zigTypeTag(mod) == .Null) { + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = .none, + } })).toValue()); } // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional() and dest_ty.elemType2().tag() == .anyopaque and - inst_ty.isPtrAtRuntime()) + if (dest_ty.isPtrLikeOptional(mod) and + dest_ty.elemType2(mod).toIntern() == .anyopaque_type and + inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25532,12 +26131,12 @@ fn coerceExtra( } // Let the logic below handle wrapping the optional now that // it has been checked to correctly coerce. - if (!inst_ty.isPtrLikeOptional()) break :anyopaque_check; + if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check; return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // T to ?T - const child_type = try dest_ty.optionalChildAlloc(sema.arena); + const child_type = dest_ty.optionalChild(mod); const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { @@ -25551,12 +26150,12 @@ fn coerceExtra( return try sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => pointer: { - const dest_info = dest_ty.ptrInfo().data; + const dest_info = dest_ty.ptrInfo(mod); // Function body to function pointer. - if (inst_ty.zigTypeTag() == .Fn) { + if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -25564,13 +26163,13 @@ fn coerceExtra( // *T to *[1]T single_item: { if (dest_info.size != .One) break :single_item; - if (!inst_ty.isSinglePointer()) break :single_item; + if (!inst_ty.isSinglePointer(mod)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const ptr_elem_ty = inst_ty.childType(); + const ptr_elem_ty = inst_ty.childType(mod); const array_ty = dest_info.pointee_type; - if (array_ty.zigTypeTag() != .Array) break :single_item; - const array_elem_ty = array_ty.childType(); - if (array_ty.arrayLen() != 1) break :single_item; + if (array_ty.zigTypeTag(mod) != .Array) break :single_item; + const array_elem_ty = array_ty.childType(mod); + if (array_ty.arrayLen(mod) != 1) break :single_item; const dest_is_mut = dest_info.mutable; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, @@ -25581,11 +26180,11 @@ fn coerceExtra( // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst_ty.isSinglePointer()) break :src_array_ptr; + if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const array_ty = inst_ty.childType(); - if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; - const array_elem_type = array_ty.childType(); + const array_ty = inst_ty.childType(mod); + if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; + const array_elem_type = array_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; @@ -25603,8 +26202,8 @@ fn coerceExtra( } if (dest_info.sentinel) |dest_sent| { - if (array_ty.sentinel()) |inst_sent| { - if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { + if (array_ty.sentinel(mod)) |inst_sent| { + if (!dest_sent.eql(inst_sent, dst_elem_type, mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, .wanted = dest_sent, @@ -25614,7 +26213,7 @@ fn coerceExtra( } } else { in_memory_result = .{ .ptr_sentinel = .{ - .actual = Value.initTag(.unreachable_value), + .actual = Value.@"unreachable", .wanted = dest_sent, .ty = dst_elem_type, } }; @@ -25640,11 +26239,11 @@ fn coerceExtra( } // coercion from C pointer - if (inst_ty.isCPtr()) src_c_ptr: { + if (inst_ty.isCPtr(mod)) src_c_ptr: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. - const src_elem_ty = inst_ty.childType(); + const src_elem_ty = inst_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { @@ -25656,18 +26255,18 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.toIntern() == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, } }; break :pointer; } - if (dest_ty.isSlice()) break :to_anyopaque; - if (inst_ty.isSlice()) { + if (dest_ty.isSlice(mod)) break :to_anyopaque; + if (inst_ty.isSlice(mod)) { in_memory_result = .{ .slice_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25679,9 +26278,9 @@ fn coerceExtra( switch (dest_info.size) { // coercion to C pointer - .C => switch (inst_ty.zigTypeTag()) { + .C => switch (inst_ty.zigTypeTag(mod)) { .Null => { - return sema.addConstant(dest_ty, Value.null); + return sema.addConstant(dest_ty, try mod.getCoerced(Value.null, dest_ty)); }, .ComptimeInt => { const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { @@ -25691,7 +26290,7 @@ fn coerceExtra( return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { - const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { + const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; @@ -25707,7 +26306,7 @@ fn coerceExtra( }, .Pointer => p: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, dest_info.pointee_type, @@ -25723,7 +26322,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25733,11 +26332,11 @@ fn coerceExtra( }, else => {}, }, - .One => switch (dest_info.pointee_type.zigTypeTag()) { + .One => switch (dest_info.pointee_type.zigTypeTag(mod)) { .Union => { // pointer to anonymous struct to pointer to union - if (inst_ty.isSinglePointer() and - inst_ty.childType().isAnonStruct() and + if (inst_ty.isSinglePointer(mod) and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25745,8 +26344,8 @@ fn coerceExtra( }, .Struct => { // pointer to anonymous struct to pointer to struct - if (inst_ty.isSinglePointer() and - inst_ty.childType().isAnonStruct() and + if (inst_ty.isSinglePointer(mod) and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -25757,8 +26356,8 @@ fn coerceExtra( }, .Array => { // pointer to tuple to pointer to array - if (inst_ty.isSinglePointer() and - inst_ty.childType().isTuple() and + if (inst_ty.isSinglePointer(mod) and + inst_ty.childType(mod).isTuple(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25767,38 +26366,38 @@ fn coerceExtra( else => {}, }, .Slice => to_slice: { - if (inst_ty.zigTypeTag() == .Array) { + if (inst_ty.zigTypeTag(mod) == .Array) { return sema.fail( block, inst_src, "array literal requires address-of operator (&) to coerce to slice type '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ); } - if (!inst_ty.isSinglePointer()) break :to_slice; - const inst_child_ty = inst_ty.childType(); - if (!inst_child_ty.isTuple()) break :to_slice; + if (!inst_ty.isSinglePointer(mod)) break :to_slice; + const inst_child_ty = inst_ty.childType(mod); + if (!inst_child_ty.isTuple(mod)) break :to_slice; // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. - if (inst_child_ty.structFieldCount() == 0) { + if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = if (dest_info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") + return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ + .ty = dest_ty.toIntern(), + .addr = .{ .int = (if (dest_info.@"align" != 0) + try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(target, sema.arena), - .len = Value.zero, - }); - return sema.addConstant(dest_ty, slice_val); + try mod.getCoerced(try dest_info.pointee_type.lazyAbiAlignment(mod), Type.usize)).toIntern() }, + .len = (try mod.intValue(Type.usize, 0)).toIntern(), + } })).toValue()); } // pointer to tuple to slice if (dest_info.mutable) { const err_msg = err_msg: { - const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)}); + const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)}); errdefer err_msg.deinit(sema.gpa); try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); break :err_msg err_msg; @@ -25808,9 +26407,9 @@ fn coerceExtra( return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); }, .Many => p: { - if (!inst_ty.isSlice()) break :p; + if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, @@ -25826,7 +26425,11 @@ fn coerceExtra( } if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) + !dest_info.sentinel.?.eql( + try mod.getCoerced(inst_info.sentinel.?, dest_info.pointee_type), + dest_info.pointee_type, + mod, + )) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25834,25 +26437,25 @@ fn coerceExtra( }, } }, - .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { + .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) { .Float, .ComptimeFloat => float: { if (is_undef) { return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } break :float; }; - if (val.floatHasFraction()) { + if (val.floatHasFraction(mod)) { return sema.fail( block, inst_src, "fractional component prevents float value '{}' from coercion to type '{}'", - .{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) }, + .{ val.fmtValue(inst_ty, mod), dest_ty.fmt(mod) }, ); } const result_val = try sema.floatToInt(block, inst_src, val, inst_ty, dest_ty); @@ -25866,19 +26469,19 @@ fn coerceExtra( // comptime-known integer to other number if (!(try sema.intFitsInType(val, dest_ty, null))) { if (!opts.report_err) return error.NotCoercible; - return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); + return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }); } - return try sema.addConstant(dest_ty, val); + return try sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; if (opts.no_cast_to_comptime_int) return inst; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } // integer widening - const dst_info = dest_ty.intInfo(target); - const src_info = inst_ty.intInfo(target); + const dst_info = dest_ty.intInfo(mod); + const src_info = inst_ty.intInfo(mod); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) @@ -25892,10 +26495,10 @@ fn coerceExtra( }, else => {}, }, - .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) { + .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(dest_ty, mod); return try sema.addConstant(dest_ty, result_val); }, .Float => { @@ -25903,17 +26506,17 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } if (try sema.resolveMaybeUndefVal(inst)) |val| { - const result_val = try val.floatCast(sema.arena, dest_ty, target); - if (!val.eql(result_val, inst_ty, sema.mod)) { + const result_val = try val.floatCast(dest_ty, mod); + if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) { return sema.fail( block, inst_src, "type '{}' cannot represent float value '{}'", - .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }, + .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }, ); } return try sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25931,13 +26534,13 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeFloat) { + if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } break :int; }; - const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema); + const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, mod, sema); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, mod)) { @@ -25945,7 +26548,7 @@ fn coerceExtra( // block, // inst_src, // "type '{}' cannot represent integer value '{}'", - // .{ dest_ty.fmt(sema.mod), val }, + // .{ dest_ty.fmt(mod), val }, // ); //} return try sema.addConstant(dest_ty, result_val); @@ -25955,18 +26558,18 @@ fn coerceExtra( }, else => {}, }, - .Enum => switch (inst_ty.zigTypeTag()) { + .Enum => switch (inst_ty.zigTypeTag(mod)) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const bytes = val.castTag(.enum_literal).?.data; - const field_index = dest_ty.enumFieldIndex(bytes) orelse { + const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; + const field_index = dest_ty.enumFieldIndex(string, mod) orelse { const msg = msg: { const msg = try sema.errMsg( block, inst_src, - "no field named '{s}' in enum '{}'", - .{ bytes, dest_ty.fmt(sema.mod) }, + "no field named '{}' in enum '{}'", + .{ string.fmt(&mod.intern_pool), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -25976,13 +26579,13 @@ fn coerceExtra( }; return sema.addConstant( dest_ty, - try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), + try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)), ); }, .Union => blk: { // union to its own tag type - const union_tag_ty = inst_ty.unionTagType() orelse break :blk; - if (union_tag_ty.eql(dest_ty, sema.mod)) { + const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk; + if (union_tag_ty.eql(dest_ty, mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } }, @@ -25991,27 +26594,33 @@ fn coerceExtra( }, else => {}, }, - .ErrorUnion => switch (inst_ty.zigTypeTag()) { + .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.tag()) { + switch (inst_val.toIntern()) { .undef => return sema.addConstUndef(dest_ty), - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; - }, - else => { - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); + else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| { + const error_set_ty = inst_ty.errorUnionSet(mod); + const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ + .ty = error_set_ty.toIntern(), + .name = err_name, + } })).toValue()); + return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); + }, + .payload => |payload| { + const payload_val = try sema.addConstant( + inst_ty.errorUnionPayload(mod), + payload.toValue(), + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, + }, + else => unreachable, }, } } @@ -26031,10 +26640,10 @@ fn coerceExtra( }; }, }, - .Union => switch (inst_ty.zigTypeTag()) { + .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isAnonStruct()) { + if (inst_ty.isAnonStruct(mod)) { return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26043,13 +26652,13 @@ fn coerceExtra( }, else => {}, }, - .Array => switch (inst_ty.zigTypeTag()) { + .Array => switch (inst_ty.zigTypeTag(mod)) { .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst == .empty_struct) { return sema.arrayInitEmpty(block, inst_src, dest_ty); } - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26058,10 +26667,10 @@ fn coerceExtra( }, else => {}, }, - .Vector => switch (inst_ty.zigTypeTag()) { + .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26074,7 +26683,7 @@ fn coerceExtra( if (inst == .empty_struct) { return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src); } - if (inst_ty.isTupleOrAnonStruct()) { + if (inst_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :blk, else => |e| return e, @@ -26093,35 +26702,34 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; - if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) { + if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{}); errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "'noreturn' declared here", .{}); + const src_decl = mod.declPtr(sema.func.?.owner_decl); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const msg = msg: { - const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); // E!T to T - if (inst_ty.zigTypeTag() == .ErrorUnion and - (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + if (inst_ty.zigTypeTag(mod) == .ErrorUnion and + (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{}); } // ?T to T - var buf: Type.Payload.ElemType = undefined; - if (inst_ty.zigTypeTag() == .Optional and - (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + if (inst_ty.zigTypeTag(mod) == .Optional and + (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{}); @@ -26130,18 +26738,18 @@ fn coerceExtra( try in_memory_result.report(sema, block, inst_src, msg); // Add notes about function return type - if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { + if (opts.is_ret and mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - if (inst_ty.isError() and !dest_ty.isError()) { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); + const src_decl = mod.declPtr(sema.func.?.owner_decl); + if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); } } if (try opts.param_src.get(sema)) |param_src| { - try sema.mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); + try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); } // TODO maybe add "cannot store an error in type '{}'" note @@ -26151,6 +26759,84 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); } +fn coerceValueInMemory( + sema: *Sema, + block: *Block, + val: Value, + src_ty: Type, + dst_ty: Type, + dst_ty_src: LazySrcLoc, +) CompileError!Value { + const mod = sema.mod; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .aggregate => |aggregate| { + const dst_ty_key = mod.intern_pool.indexToKey(dst_ty.toIntern()); + const dest_len = try sema.usizeCast( + block, + dst_ty_src, + mod.intern_pool.aggregateTypeLen(dst_ty.toIntern()), + ); + direct: { + const src_ty_child = switch (mod.intern_pool.indexToKey(src_ty.toIntern())) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + const dst_ty_child = switch (dst_ty_key) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + if (src_ty_child != dst_ty_child) break :direct; + // TODO: write something like getCoercedInts to avoid needing to dupe + return (try mod.intern(.{ .aggregate = .{ + .ty = dst_ty.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = try sema.arena.dupe(u8, bytes[0..dest_len]) }, + .elems => |elems| .{ .elems = try sema.arena.dupe(InternPool.Index, elems[0..dest_len]) }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(); + } + const dest_elems = try sema.arena.alloc(InternPool.Index, dest_len); + for (dest_elems, 0..) |*dest_elem, i| { + const elem_ty = switch (dst_ty_key) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).? + .fields.values()[i].ty.toIntern(), + else => unreachable, + }; + dest_elem.* = try mod.intern_pool.getCoerced(mod.gpa, switch (aggregate.storage) { + .bytes => |bytes| (try mod.intValue(Type.u8, bytes[i])).toIntern(), + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }, elem_ty); + } + return (try mod.intern(.{ .aggregate = .{ + .ty = dst_ty.toIntern(), + .storage = .{ .elems = dest_elems }, + } })).toValue(); + }, + .float => |float| (try mod.intern(.{ .float = .{ + .ty = dst_ty.toIntern(), + .storage = float.storage, + } })).toValue(), + else => try mod.getCoerced(val, dst_ty), + }; +} + +fn coerceInMemory( + sema: *Sema, + block: *Block, + val: Value, + src_ty: Type, + dst_ty: Type, + dst_ty_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + return sema.addConstant(dst_ty, try sema.coerceValueInMemory(block, val, src_ty, dst_ty, dst_ty_src)); +} + const InMemoryCoercionResult = union(enum) { ok, no_match: Pair, @@ -26164,7 +26850,7 @@ const InMemoryCoercionResult = union(enum) { optional_shape: Pair, optional_child: PairAndChild, from_anyerror, - missing_error: []const []const u8, + missing_error: []const InternPool.NullTerminatedString, /// true if wanted is var args fn_var_args: bool, /// true if wanted is generic @@ -26264,6 +26950,7 @@ const InMemoryCoercionResult = union(enum) { } fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { + const mod = sema.mod; var cur = res; while (true) switch (cur.*) { .ok => unreachable, @@ -26280,7 +26967,7 @@ const InMemoryCoercionResult = union(enum) { }, .error_union_payload => |pair| { try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26291,20 +26978,20 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; }, .array_elem => |pair| { try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26316,21 +27003,19 @@ const InMemoryCoercionResult = union(enum) { }, .vector_elem => |pair| { try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .optional_shape => |pair| { - var buf_actual: Type.Payload.ElemType = undefined; - var buf_wanted: Type.Payload.ElemType = undefined; try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(&buf_actual).fmt(sema.mod), pair.wanted.optionalChild(&buf_wanted).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod), }); break; }, .optional_child => |pair| { try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26340,7 +27025,7 @@ const InMemoryCoercionResult = union(enum) { }, .missing_error => |missing_errors| { for (missing_errors) |err| { - try sema.errNote(block, src, msg, "'error.{s}' not a member of destination error set", .{err}); + try sema.errNote(block, src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)}); } break; }, @@ -26394,7 +27079,7 @@ const InMemoryCoercionResult = union(enum) { }, .fn_param => |param| { try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{ - param.index, param.actual.fmt(sema.mod), param.wanted.fmt(sema.mod), + param.index, param.actual.fmt(mod), param.wanted.fmt(mod), }); cur = param.child; }, @@ -26404,13 +27089,13 @@ const InMemoryCoercionResult = union(enum) { }, .fn_return_type => |pair| { try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .ptr_child => |pair| { try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26419,13 +27104,13 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; @@ -26445,15 +27130,15 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_allowzero => |pair| { - const wanted_allow_zero = pair.wanted.ptrAllowsZero(); - const actual_allow_zero = pair.actual.ptrAllowsZero(); + const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod); + const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } else { try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } break; @@ -26479,13 +27164,13 @@ const InMemoryCoercionResult = union(enum) { }, .double_ptr_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); break; }, .slice_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); try sema.errNote(block, src, msg, "consider using '.ptr'", .{}); break; @@ -26522,13 +27207,18 @@ fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - if (dest_ty.eql(src_ty, sema.mod)) + const mod = sema.mod; + + if (dest_ty.eql(src_ty, mod)) return .ok; + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); + // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag() == .Int and src_ty.zigTypeTag() == .Int) { - const dest_info = dest_ty.intInfo(target); - const src_info = src_ty.intInfo(target); + if (dest_tag == .Int and src_tag == .Int) { + const dest_info = dest_ty.intInfo(mod); + const src_info = src_ty.intInfo(mod); if (dest_info.signedness == src_info.signedness and dest_info.bits == src_info.bits) @@ -26551,7 +27241,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag() == .Float and src_ty.zigTypeTag() == .Float) { + if (dest_tag == .Float and src_tag == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -26560,10 +27250,8 @@ fn coerceInMemoryAllowed( } // Pointers / Pointer-like Optionals - var dest_buf: Type.Payload.ElemType = undefined; - var src_buf: Type.Payload.ElemType = undefined; - const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty, &dest_buf); - const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty, &src_buf); + const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty); + const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty); if (maybe_dest_ptr_ty) |dest_ptr_ty| { if (maybe_src_ptr_ty) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); @@ -26571,13 +27259,10 @@ fn coerceInMemoryAllowed( } // Slices - if (dest_ty.isSlice() and src_ty.isSlice()) { + if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(); - const src_tag = src_ty.zigTypeTag(); - // Functions if (dest_tag == .Fn and src_tag == .Fn) { return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src); @@ -26585,8 +27270,8 @@ fn coerceInMemoryAllowed( // Error Unions if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { - const dest_payload = dest_ty.errorUnionPayload(); - const src_payload = src_ty.errorUnionPayload(); + const dest_payload = dest_ty.errorUnionPayload(mod); + const src_payload = src_ty.errorUnionPayload(mod); const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .error_union_payload = .{ @@ -26595,7 +27280,7 @@ fn coerceInMemoryAllowed( .wanted = dest_payload, } }; } - return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target, dest_src, src_src); + return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src); } // Error Sets @@ -26605,8 +27290,8 @@ fn coerceInMemoryAllowed( // Arrays if (dest_tag == .Array and src_tag == .Array) { - const dest_info = dest_ty.arrayInfo(); - const src_info = src_ty.arrayInfo(); + const dest_info = dest_ty.arrayInfo(mod); + const src_info = src_ty.arrayInfo(mod); if (dest_info.len != src_info.len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_info.len, @@ -26624,11 +27309,15 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod)); + dest_info.sentinel.?.eql( + try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type), + dest_info.elem_type, + mod, + )); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.elem_type, } }; } @@ -26637,8 +27326,8 @@ fn coerceInMemoryAllowed( // Vectors if (dest_tag == .Vector and src_tag == .Vector) { - const dest_len = dest_ty.vectorLen(); - const src_len = src_ty.vectorLen(); + const dest_len = dest_ty.vectorLen(mod); + const src_len = src_ty.vectorLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .vector_len = .{ .actual = src_len, @@ -26646,8 +27335,8 @@ fn coerceInMemoryAllowed( } }; } - const dest_elem_ty = dest_ty.scalarType(); - const src_elem_ty = src_ty.scalarType(); + const dest_elem_ty = dest_ty.scalarType(mod); + const src_elem_ty = src_ty.scalarType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .vector_elem = .{ @@ -26668,15 +27357,15 @@ fn coerceInMemoryAllowed( .wanted = dest_ty, } }; } - const dest_child_type = dest_ty.optionalChild(&dest_buf); - const src_child_type = src_ty.optionalChild(&src_buf); + const dest_child_type = dest_ty.optionalChild(mod); + const src_child_type = src_ty.optionalChild(mod); const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .optional_child = .{ .child = try child.dupe(sema.arena), - .actual = try src_child_type.copy(sema.arena), - .wanted = try dest_child_type.copy(sema.arena), + .actual = src_child_type, + .wanted = dest_child_type, } }; } @@ -26697,138 +27386,108 @@ fn coerceInMemoryAllowedErrorSets( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } - if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { - const dst_ies = dst_payload.data; + if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| { + const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.tag()) { - .error_set_inferred => { - // If both are inferred error sets of functions, and - // the dest includes the source function, the coercion is OK. - // This check is important because it works without forcing a full resolution - // of inferred error sets. - const src_ies = src_ty.castTag(.error_set_inferred).?.data; - - if (dst_ies.inferred_error_sets.contains(src_ies)) { - return .ok; - } + switch (src_ty.toIntern()) { + .anyerror_type => {}, + else => switch (ip.indexToKey(src_ty.toIntern())) { + .inferred_error_set_type => |src_index| { + // If both are inferred error sets of functions, and + // the dest includes the source function, the coercion is OK. + // This check is important because it works without forcing a full resolution + // of inferred error sets. + if (dst_ies.inferred_error_sets.contains(src_index)) { + return .ok; + } + }, + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + if (!dst_ies.errors.contains(name)) break; + } else return .ok; + }, + else => unreachable, }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dst_ies.errors.contains(name)) return .ok; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - .anyerror => {}, - else => unreachable, } - if (dst_ies.func == sema.owner_func) { + if (dst_ies.func == sema.owner_func_index.unwrap()) { // We are trying to coerce an error set to the current function's // inferred error set. - try dst_ies.addErrorSet(sema.gpa, src_ty); + try dst_ies.addErrorSet(src_ty, ip, gpa); return .ok; } - try sema.resolveInferredErrorSet(block, dest_src, dst_payload.data); + try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } } - var missing_error_buf = std.ArrayList([]const u8).init(sema.gpa); + var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); - switch (src_ty.tag()) { - .error_set_inferred => { - const src_data = src_ty.castTag(.error_set_inferred).?.data; - - try sema.resolveInferredErrorSet(block, src_src, src_data); - // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError()) { - // dest_ty.isAnyError() == true is already checked for at this point. - return .from_anyerror; - } - - for (src_data.errors.keys()) |key| { - if (!dest_ty.errorSetHasField(key)) { - try missing_error_buf.append(key); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dest_ty.errorSetHasField(name)) { - return .ok; - } - const list = try sema.arena.alloc([]const u8, 1); - list[0] = name; - return InMemoryCoercionResult{ .missing_error = list }; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .anyerror => switch (dest_ty.tag()) { - .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. - .error_set_single, .error_set_merged, .error_set => return .from_anyerror, - .anyerror => unreachable, // Filtered out above. + switch (src_ty.toIntern()) { + .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { + .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. + .simple_type => unreachable, // filtered out above + .error_set_type => return .from_anyerror, + else => unreachable, + }, + + else => switch (ip.indexToKey(src_ty.toIntern())) { + .inferred_error_set_type => |src_index| { + const src_data = mod.inferredErrorSetPtr(src_index); + + try sema.resolveInferredErrorSet(block, src_src, src_index); + // src anyerror status might have changed after the resolution. + if (src_ty.isAnyError(mod)) { + // dest_ty.isAnyError(mod) == true is already checked for at this point. + return .from_anyerror; + } + + for (src_data.errors.keys()) |key| { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { + try missing_error_buf.append(key); + } + } + + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), + }; + } + + return .ok; + }, + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { + try missing_error_buf.append(name); + } + } + + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), + }; + } + + return .ok; + }, else => unreachable, }, - else => unreachable, } unreachable; @@ -26843,68 +27502,94 @@ fn coerceInMemoryAllowedFns( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ty.fnInfo(); - const src_info = src_ty.fnInfo(); + const mod = sema.mod; - if (dest_info.is_var_args != src_info.is_var_args) { - return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; - } + { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.is_generic != src_info.is_generic) { - return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; - } + if (dest_info.is_var_args != src_info.is_var_args) { + return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; + } - if (dest_info.cc != src_info.cc) { - return InMemoryCoercionResult{ .fn_cc = .{ - .actual = src_info.cc, - .wanted = dest_info.cc, - } }; - } + if (dest_info.is_generic != src_info.is_generic) { + return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; + } - if (!src_info.return_type.isNoReturn()) { - const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src); - if (rt != .ok) { - return InMemoryCoercionResult{ .fn_return_type = .{ - .child = try rt.dupe(sema.arena), - .actual = src_info.return_type, - .wanted = dest_info.return_type, + if (dest_info.cc != src_info.cc) { + return InMemoryCoercionResult{ .fn_cc = .{ + .actual = src_info.cc, + .wanted = dest_info.cc, } }; } + + switch (src_info.return_type) { + .noreturn_type, .generic_poison_type => {}, + else => { + const dest_return_type = dest_info.return_type.toType(); + const src_return_type = src_info.return_type.toType(); + const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src); + if (rt != .ok) { + return InMemoryCoercionResult{ .fn_return_type = .{ + .child = try rt.dupe(sema.arena), + .actual = src_return_type, + .wanted = dest_return_type, + } }; + } + }, + } } - if (dest_info.param_types.len != src_info.param_types.len) { - return InMemoryCoercionResult{ .fn_param_count = .{ - .actual = src_info.param_types.len, - .wanted = dest_info.param_types.len, - } }; - } + const params_len = params_len: { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.noalias_bits != src_info.noalias_bits) { - return InMemoryCoercionResult{ .fn_param_noalias = .{ - .actual = src_info.noalias_bits, - .wanted = dest_info.noalias_bits, - } }; - } + if (dest_info.param_types.len != src_info.param_types.len) { + return InMemoryCoercionResult{ .fn_param_count = .{ + .actual = src_info.param_types.len, + .wanted = dest_info.param_types.len, + } }; + } - for (dest_info.param_types, 0..) |dest_param_ty, i| { - const src_param_ty = src_info.param_types[i]; + if (dest_info.noalias_bits != src_info.noalias_bits) { + return InMemoryCoercionResult{ .fn_param_noalias = .{ + .actual = src_info.noalias_bits, + .wanted = dest_info.noalias_bits, + } }; + } - if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { + break :params_len dest_info.param_types.len; + }; + + for (0..params_len) |param_i| { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; + + const dest_param_ty = dest_info.param_types[param_i].toType(); + const src_param_ty = src_info.param_types[param_i].toType(); + + const param_i_small = @intCast(u5, param_i); + if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ - .index = i, - .wanted = dest_info.comptime_params[i], + .index = param_i, + .wanted = dest_info.paramIsComptime(param_i_small), } }; } - // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); - if (param != .ok) { - return InMemoryCoercionResult{ .fn_param = .{ - .child = try param.dupe(sema.arena), - .actual = src_param_ty, - .wanted = dest_param_ty, - .index = i, - } }; + switch (src_param_ty.toIntern()) { + .generic_poison_type => {}, + else => { + // Note: Cast direction is reversed here. + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); + if (param != .ok) { + return InMemoryCoercionResult{ .fn_param = .{ + .child = try param.dupe(sema.arena), + .actual = src_param_ty, + .wanted = dest_param_ty, + .index = param_i, + } }; + } + }, } } @@ -26923,8 +27608,9 @@ fn coerceInMemoryAllowedPtrs( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ptr_ty.ptrInfo().data; - const src_info = src_ptr_ty.ptrInfo().data; + const mod = sema.mod; + const dest_info = dest_ptr_ty.ptrInfo(mod); + const src_info = src_ptr_ty.ptrInfo(mod); const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; @@ -26964,8 +27650,8 @@ fn coerceInMemoryAllowedPtrs( } }; } - const dest_allow_zero = dest_ty.ptrAllowsZero(); - const src_allow_zero = src_ty.ptrAllowsZero(); + const dest_allow_zero = dest_ty.ptrAllowsZero(mod); + const src_allow_zero = src_ty.ptrAllowsZero(mod); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or @@ -26989,12 +27675,15 @@ fn coerceInMemoryAllowedPtrs( } const ok_sent = dest_info.sentinel == null or src_info.size == .C or - (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod)); + (src_info.sentinel != null and dest_info.sentinel.?.eql( + try mod.getCoerced(src_info.sentinel.?, dest_info.pointee_type), + dest_info.pointee_type, + sema.mod, + )); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.pointee_type, } }; } @@ -27013,12 +27702,12 @@ fn coerceInMemoryAllowedPtrs( const src_align = if (src_info.@"align" != 0) src_info.@"align" else - src_info.pointee_type.abiAlignment(target); + src_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > src_align) { return InMemoryCoercionResult{ .ptr_alignment = .{ @@ -27041,8 +27730,9 @@ fn coerceVarArgParam( ) !Air.Inst.Ref { if (block.is_typeof) return inst; + const mod = sema.mod; const uncasted_ty = sema.typeOf(inst); - const coerced = switch (uncasted_ty.zigTypeTag()) { + const coerced = switch (uncasted_ty.zigTypeTag(mod)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, @@ -27052,7 +27742,7 @@ fn coerceVarArgParam( ), .Fn => blk: { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; break :blk try sema.analyzeDeclRef(fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), @@ -27077,7 +27767,7 @@ fn coerceVarArgParam( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl), coerced_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl, mod), coerced_ty, .param_ty); try sema.addDeclaredHereNote(msg, coerced_ty); break :msg msg; @@ -27109,11 +27799,12 @@ fn storePtr2( operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - if (ptr_ty.isConstPtr()) + if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy @@ -27124,8 +27815,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag() == .Array) { - const field_count = operand_ty.structFieldCount(); + if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) { + const field_count = operand_ty.structFieldCount(mod); var i: u32 = 0; while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location @@ -27149,7 +27840,7 @@ fn storePtr2( // as well as working around an LLVM bug: // https://github.com/ziglang/zig/issues/11154 if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { - const vector_ty = sema.typeOf(vector_ptr).childType(); + const vector_ty = sema.typeOf(vector_ptr).childType(mod); const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -27169,7 +27860,7 @@ fn storePtr2( try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; @@ -27190,7 +27881,7 @@ fn storePtr2( try sema.requireRuntimeBlock(block, src, runtime_src); try sema.queueFullTypeResolution(elem_ty); - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -27224,30 +27915,27 @@ fn storePtr2( /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { - const array_ty = sema.typeOf(ptr).childType(); - if (array_ty.zigTypeTag() != .Array) return null; - var ptr_inst = Air.refToIndex(ptr) orelse return null; + const mod = sema.mod; + const array_ty = sema.typeOf(ptr).childType(mod); + if (array_ty.zigTypeTag(mod) != .Array) return null; + var ptr_ref = ptr; + var ptr_inst = Air.refToIndex(ptr_ref) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); - const prev_ptr = while (air_tags[ptr_inst] == .bitcast) { - const prev_ptr = air_datas[ptr_inst].ty_op.operand; - const prev_ptr_ty = sema.typeOf(prev_ptr); - const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { - .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data, - .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, - else => return null, - }; - if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr; - ptr_inst = Air.refToIndex(prev_ptr) orelse return null; + const vector_ty = while (air_tags[ptr_inst] == .bitcast) { + ptr_ref = air_datas[ptr_inst].ty_op.operand; + if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null; + const child_ty = sema.typeOf(ptr_ref).childType(mod); + if (child_ty.zigTypeTag(mod) == .Vector) break child_ty; + ptr_inst = Air.refToIndex(ptr_ref) orelse return null; } else return null; // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(); - if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and - array_ty.arrayLen() == vector_ty.vectorLen()) + if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and + array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { - return prev_ptr; + return ptr_ref; } else { return null; } @@ -27263,54 +27951,55 @@ fn storePtrVal( operand_val: Value, operand_ty: Type, ) !void { + const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); - try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); + try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl); switch (mut_kit.pointee) { .direct => |val_ptr| { - if (mut_kit.decl_ref_mut.runtime_index == .comptime_field_ptr) { - if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { + if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { + if (!operand_val.eql(val_ptr.*, operand_ty, mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); } return; } - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - val_ptr.* = try operand_val.copy(arena); + val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue(); }, .reinterpret => |reinterpret| { - const target = sema.mod.getTarget(); - const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - reinterpret.val_ptr.* = try Value.readFromMemory(mut_kit.ty, sema.mod, buffer, arena); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl // or the pointer is the problematic type - return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(sema.mod)}); + return sema.fail( + block, + src, + "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", + .{mut_kit.ty.fmt(mod)}, + ); }, } } const ComptimePtrMutationKit = struct { - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27333,18 +28022,6 @@ const ComptimePtrMutationKit = struct { bad_ptr_ty, }, ty: Type, - decl_arena: std.heap.ArenaAllocator = undefined, - - fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); - return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); - } - - fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); - decl.value_arena.?.release(&self.decl_arena); - self.decl_arena = undefined; - } }; fn beginComptimePtrMutation( @@ -27354,201 +28031,251 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + const mod = sema.mod; + const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; + switch (ptr.addr) { + .decl, .int => unreachable, // isComptimeMutablePtr has been checked already + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), + duped.* = comptime_field.toValue(); + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(comptime_field).toType(), duped, ptr_elem_ty, .{ + .decl = undefined, .runtime_index = .comptime_field_ptr, }); }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); + .eu_payload => |eu_ptr| { + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + + const payload = try sema.arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = eu_ty, + }, + } + }, + .opt_payload => |opt_ptr| { + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.ip_index) { + .none => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => { + const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) { + .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .opt => |opt| switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => |payload| payload, + }, + else => unreachable, + }; + + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + + const payload = try sema.arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = payload_val.toValue(), + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + }, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = opt_ty, + }, + } + }, + .elem => |elem_ptr| { + const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag()) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(); + const check_len = parent.ty.arrayLenIncludingSentinel(mod); if (elem_ptr.index >= check_len) { // TODO have the parent include the decl so we can say "declared here" return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ elem_ptr.index, check_len, }); } - const elem_ty = parent.ty.childType(); + const elem_ty = parent.ty.childType(mod); // We might have a pointer to multiple elements of the array (e.g. a pointer // to a sub-array). In this case, we just have to reinterpret the relevant // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return .{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, + .byte_offset = elem_abi_size * elem_idx, } }, .ty = parent.ty, }; } - switch (val_ptr.tag()) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + switch (val_ptr.ip_index) { + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = sema.arena; - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - return beginComptimePtrMutationInner( + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[@intCast(usize, elem_ptr.index)], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = sema.arena; + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[@intCast(usize, elem_ptr.index)], + ptr_elem_ty, + parent.mut_decl, + ); + }, + + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)], ptr_elem_ty, - parent.decl_ref_mut, - ); + parent.mut_decl, + ), + + else => unreachable, }, - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. + const arena = sema.arena; - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); - } + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, (try mod.intern(.{ .undef = elem_ty.toIntern() })).toValue()); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[@intCast(usize, elem_ptr.index)], + ptr_elem_ty, + parent.mut_decl, + ); + }, + else => unreachable, }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); - } - if (parent.ty.sentinel()) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - else => unreachable, } }, else => { @@ -27565,28 +28292,29 @@ fn beginComptimePtrMutation( parent.ty, val_ptr, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, }, .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout()) { + if (!base_elem_ty.hasWellDefinedLayout(mod)) { // Even though the parent value type has well-defined memory layout, our // pointer type does not. return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, + .ty = base_elem_ty, }; } - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx, } }, .ty = parent.ty, }; @@ -27594,162 +28322,184 @@ fn beginComptimePtrMutation( .bad_decl_ty, .bad_ptr_ty => return parent, } }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); + .field => |field_ptr| { + const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + const field_index = @intCast(u32, field_ptr.index); - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.tag()) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - switch (parent.ty.zigTypeTag()) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount()); - @memset(fields, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &fields[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - payload.* = .{ .data = .{ - .tag = try Value.Tag.enum_field_index.create(arena, field_index), - .val = Value.undef, - } }; - - val_ptr.* = Value.initPayload(&payload.base); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &payload.data.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .Pointer => { - assert(parent.ty.isSlice()); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); - - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - } - }, - else => unreachable, - } - }, - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - - .@"union" => { - // We need to set the active field of the union. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - }, - - .empty_struct_value => { + .direct => |val_ptr| switch (val_ptr.ip_index) { + .empty_struct => { const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); + duped.* = val_ptr.*; return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), duped, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.mut_decl, + ), + .repeated => { + const arena = sema.arena; - else => unreachable, + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &elems[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = base_child_ty.unionTagTypeHypothetical(mod); + + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &payload.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .slice => switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), + + else => unreachable, + }, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = sema.arena; + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + for (fields, 0..) |*field, i| field.* = (try mod.intern(.{ + .undef = parent.ty.structFieldType(i, mod).toIntern(), + })).toValue(); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &fields[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + const payload_ty = parent.ty.structFieldType(field_index, mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + } }; + + val_ptr.* = Value.initPayload(&payload.base); + + return beginComptimePtrMutationInner( + sema, + block, + src, + payload_ty, + &payload.data.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .Pointer => { + assert(parent.ty.isSlice(mod)); + const ptr_ty = parent.ty.slicePtrFieldType(mod); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = (try mod.intern(.{ .undef = ptr_ty.toIntern() })).toValue(), + .len = (try mod.intern(.{ .undef = .usize_type })).toValue(), + }); + + switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + ptr_ty, + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), + + else => unreachable, + } + }, + else => unreachable, + } + }, + else => unreachable, + }, }, .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = reinterpret.val_ptr, .byte_offset = reinterpret.byte_offset + field_offset, @@ -27760,106 +28510,6 @@ fn beginComptimePtrMutation( .bad_decl_ty, .bad_ptr_ty => return parent, } }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(); - switch (val_ptr.tag()) { - else => { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .eu_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, try ptr_elem_ty.optionalChildAlloc(sema.arena)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = try parent.ty.optionalChildAlloc(sema.arena); - switch (val_ptr.tag()) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .opt_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, - - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, - }, - } - }, - .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already - else => unreachable, } } @@ -27870,46 +28520,50 @@ fn beginComptimePtrMutationInner( decl_ty: Type, decl_val: *Value, ptr_elem_ty: Type, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; + + decl_val.* = try decl_val.unintern(sema.arena, mod); + if (coerce_ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; } // Handle the case that the decl is an array and we're actually trying to point to an element. - if (decl_ty.isArrayOrVector()) { - const decl_elem_ty = decl_ty.childType(); + if (decl_ty.isArrayOrVector(mod)) { + const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; } } - if (!decl_ty.hasWellDefinedLayout()) { + if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_decl_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_decl_ty, .ty = decl_ty, }; } - if (!ptr_elem_ty.hasWellDefinedLayout()) { + if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_ptr_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_ptr_ty, .ty = ptr_elem_ty, }; } return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = decl_val, .byte_offset = 0, @@ -27951,237 +28605,227 @@ fn beginComptimePtrLoad( ptr_val: Value, maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { - const target = sema.mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; + const mod = sema.mod; + const target = mod.getTarget(); - const layout_defined = decl.ty.hasWellDefinedLayout(); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = is_mutable, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; - }, + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl => blk: { + const decl_index = switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }; + const is_mutable = ptr.addr == .mut_decl; + const decl = mod.declPtr(decl_index); + const decl_tv = try decl.typedValue(); + if (decl.val.getVariable(mod) != null) return error.RuntimeLoad; - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + const layout_defined = decl.ty.hasWellDefinedLayout(mod); + break :blk ComptimePtrLoadKit{ + .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, + .pointee = decl_tv, + .is_mutable = is_mutable, + .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, + }; + }, + .int => return error.RuntimeLoad, + .eu_payload, .opt_payload => |container_ptr| blk: { + const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); + const payload_ty = switch (ptr.addr) { + .eu_payload => container_ty.errorUnionPayload(mod), + .opt_payload => container_ty.optionalChild(mod), + else => unreachable, + }; + var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); - } + // eu_payload and opt_payload never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout()) { - if (deref.parent) |*parent| { + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (tv.val.ip_index) { + .none => tv.val.cast(Value.Payload.SubValue).?.data, + .null_value => return sema.fail(block, src, "attempt to use null value", .{}), + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return sema.fail( + block, + src, + "attempt to unwrap error: {}", + .{err_name.fmt(&mod.intern_pool)}, + ), + .payload => |payload| payload, + }, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => |payload| payload, + }, + else => unreachable, + }.toValue(), + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; + break :blk deref; + } + } + deref.pointee = null; + break :blk deref; + }, + .comptime_field => |comptime_field| blk: { + const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, + .is_mutable = false, + .ty_without_well_defined_layout = field_ty, + }; + }, + .elem => |elem_ptr| blk: { + const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); + + // This code assumes that elem_ptrs have been "flattened" in order for direct dereference + // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that + // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" + switch (mod.intern_pool.indexToKey(elem_ptr.base)) { + .ptr => |base_ptr| switch (base_ptr.addr) { + .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), + else => {}, + }, + else => {}, + } + + if (elem_ptr.index != 0) { + if (elem_ty.hasWellDefinedLayout(mod)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + const elem_size = try sema.typeAbiSize(elem_ty); + const offset = parent.byte_offset + elem_size * elem_ptr.index; + parent.byte_offset = try sema.usizeCast(block, src, offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = elem_ty; + } + } + + // If we're loading an elem that was derived from a different type + // than the true type of the underlying decl, we cannot deref directly + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { + const deref_elem_ty = deref.pointee.?.ty.childType(mod); + break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; + } else false; + if (!ty_matches) { + deref.pointee = null; + break :blk deref; + } + + var array_tv = deref.pointee.?; + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); + if (maybe_array_ty) |load_ty| { + // It's possible that we're loading a [N]T, in which case we'd like to slice + // the pointee array directly from our parent array. + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); + deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ + .ty = try Type.array(sema.arena, N, null, elem_ty, mod), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_idx, elem_idx + N), + } else null; + break :blk deref; + } + } + + if (elem_ptr.index >= check_len) { + deref.pointee = null; + break :blk deref; + } + if (elem_ptr.index == check_len - 1) { + if (array_tv.ty.sentinel(mod)) |sent| { + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = sent, + }; + break :blk deref; + } + } + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)), + }; + break :blk deref; + }, + .field => |field_ptr| blk: { + const field_index = @intCast(u32, field_ptr.index); + const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); + + if (container_ty.hasWellDefinedLayout(mod)) { + const struct_obj = mod.typeToStruct(container_ty); + if (struct_obj != null and struct_obj.?.layout == .Packed) { + // packed structs are not byte addressable + deref.parent = null; + } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); + try sema.resolveTypeLayout(container_ty); + const field_offset = container_ty.structFieldOffset(field_index, mod); + parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; + deref.ty_without_well_defined_layout = container_ty; } - } - // If we're loading an elem_ptr that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector()) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; + const tv = deref.pointee orelse { + deref.pointee = null; break :blk deref; - } - } - - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel()) |sent| { - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, - }; - break :blk deref; - } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), - }; - break :blk deref; - }, - - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, - - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - - if (field_ptr.container_ty.hasWellDefinedLayout()) { - const struct_ty = field_ptr.container_ty.castTag(.@"struct"); - if (struct_ty != null and struct_ty.?.data.layout == .Packed) { - // packed structs are not byte addressable - deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; - } - - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { - deref.pointee = null; - break :blk deref; - } - - if (field_ptr.container_ty.isSlice()) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, }; - } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, field_index), - }; - } - break :blk deref; - }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } - - if (deref.pointee) |*tv| { const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (!coerce_in_mem_ok) { + deref.pointee = null; + break :blk deref; + } + + if (container_ty.isSlice(mod)) { + deref.pointee = switch (field_index) { + Value.slice_ptr_index => TypedValue{ + .ty = container_ty.slicePtrFieldType(mod), + .val = tv.val.slicePtr(mod), }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull()) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; + Value.slice_len_index => TypedValue{ + .ty = Type.usize, + .val = mod.intern_pool.indexToKey(tv.val.toIntern()).ptr.len.toValue(), }, else => unreachable, }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; + } else { + const field_ty = container_ty.structFieldType(field_index, mod); + deref.pointee = TypedValue{ + .ty = field_ty, + .val = try tv.val.fieldValue(mod, field_index), + }; } - } - deref.pointee = null; - break :blk deref; + break :blk deref; + }, }, - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => |payload| try sema.beginComptimePtrLoad(block, src, payload.toValue(), null), }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); - }, - - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - else => unreachable, }; if (deref.pointee) |tv| { - if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { + if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) { deref.parent = .{ .tv = tv, .byte_offset = 0 }; } } @@ -28196,21 +28840,21 @@ fn bitCast( inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); try sema.resolveTypeLayout(dest_ty); const old_ty = try sema.resolveTypeFields(sema.typeOf(inst)); try sema.resolveTypeLayout(old_ty); - const target = sema.mod.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const old_bits = old_ty.bitSize(target); + const dest_bits = dest_ty.bitSize(mod); + const old_bits = old_ty.bitSize(mod); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(sema.mod), + dest_ty.fmt(mod), dest_bits, - old_ty.fmt(sema.mod), + old_ty.fmt(mod), old_bits, }); } @@ -28233,20 +28877,21 @@ fn bitCastVal( new_ty: Type, buffer_offset: usize, ) !?Value { - const target = sema.mod.getTarget(); - if (old_ty.eql(new_ty, sema.mod)) return val; + const mod = sema.mod; + if (old_ty.eql(new_ty, mod)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. - const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) { + val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), }; - return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena); + return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena); } fn coerceArrayPtrToSlice( @@ -28256,25 +28901,32 @@ fn coerceArrayPtrToSlice( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); - const array_ty = ptr_array_ty.childType(); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), - }); - return sema.addConstant(dest_ty, slice_val); + const array_ty = ptr_array_ty.childType(mod); + const slice_val = try mod.intern(.{ .ptr = .{ + .ty = dest_ty.toIntern(), + .addr = switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => .{ .int = try mod.intern(.{ .undef = .usize_type }) }, + .ptr => |ptr| ptr.addr, + else => unreachable, + }, + .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), + } }); + return sema.addConstant(dest_ty, slice_val.toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.array_to_slice, dest_ty, inst); } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { - const dest_info = dest_ty.ptrInfo().data; - const inst_info = inst_ty.ptrInfo().data; - const len0 = (inst_info.pointee_type.zigTypeTag() == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or - (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or - (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); + const mod = sema.mod; + const dest_info = dest_ty.ptrInfo(mod); + const inst_info = inst_ty.ptrInfo(mod); + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or + (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or + (inst_info.pointee_type.isTuple(mod) and inst_info.pointee_type.structFieldCount(mod) == 0); const ok_cv_qualifiers = ((inst_info.mutable or !dest_info.mutable) or len0) and @@ -28298,17 +28950,16 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul } if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true; if (len0) return true; - const target = sema.mod.getTarget(); const inst_align = if (inst_info.@"align" != 0) inst_info.@"align" else - inst_info.pointee_type.abiAlignment(target); + inst_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > inst_align) { in_memory_result.* = .{ .ptr_alignment = .{ @@ -28327,26 +28978,30 @@ fn coerceCompatiblePtrs( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull() and !dest_ty.isAllowzeroPtr()) { + if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. - return sema.addConstant(dest_ty, val); + return sema.addConstant( + dest_ty, + try mod.getCoerced((try val.intern(inst_ty, mod)).toValue(), dest_ty), + ); } try sema.requireRuntimeBlock(block, inst_src, null); - const inst_allows_zero = inst_ty.zigTypeTag() != .Pointer or inst_ty.ptrAllowsZero(); - if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); + if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { - const actual_ptr = if (inst_ty.isSlice()) + const actual_ptr = if (inst_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) else inst; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); - const ok = if (inst_ty.isSlice()) ok: { + const ok = if (inst_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, inst_src, inst); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero); @@ -28364,9 +29019,11 @@ fn coerceEnumToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), @@ -28393,16 +29050,18 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); }; - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); const field_name = union_obj.fields.keys()[field_index]; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -28411,27 +29070,27 @@ fn coerceEnumToUnion( const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse { const msg = msg: { const field_name = union_obj.fields.keys()[field_index]; - const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{ - inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), field_ty.fmt(sema.mod), field_name, + const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{ + inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), + field_ty.fmt(sema.mod), field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = val, - .val = opv, - })); + return sema.addConstant(union_ty, try mod.unionValue(union_ty, val, opv)); } try sema.requireRuntimeBlock(block, inst_src, null); - if (tag_ty.isNonexhaustiveEnum()) { + if (tag_ty.isNonexhaustiveEnum(mod)) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{ union_ty.fmt(sema.mod), @@ -28443,13 +29102,13 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); } - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; { var msg: ?*Module.ErrorMsg = null; errdefer if (msg) |some| some.destroy(sema.gpa); for (union_obj.fields.values(), 0..) |field, i| { - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { const err_msg = msg orelse try sema.errMsg( block, inst_src, @@ -28469,7 +29128,7 @@ fn coerceEnumToUnion( } // If the union has all fields 0 bits, the union value is just the enum value. - if (union_ty.unionHasAllZeroBitFieldTypes()) { + if (union_ty.unionHasAllZeroBitFieldTypes(mod)) { return block.addBitCast(union_ty, enum_tag); } @@ -28487,8 +29146,11 @@ fn coerceEnumToUnion( while (it.next()) |field| : (field_index += 1) { const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); + if (!(try sema.typeHasRuntimeBits(field_ty))) continue; + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{ + field_name.fmt(ip), + field_ty.fmt(sema.mod), + }); } try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -28504,36 +29166,55 @@ fn coerceAnonStructToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const field_count = inst_ty.structFieldCount(); - if (field_count != 1) { - const msg = msg: { - const msg = if (field_count > 1) try sema.errMsg( - block, - inst_src, - "cannot initialize multiple union fields at once; unions can only have one active field", - .{}, - ) else try sema.errMsg( - block, - inst_src, - "union initializer must initialize one field", - .{}, - ); - errdefer msg.destroy(sema.gpa); + const field_info: union(enum) { + name: InternPool.NullTerminatedString, + count: usize, + } = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) + .{ .name = anon_struct_type.names[0] } + else + .{ .count = anon_struct_type.names.len }, + .struct_type => |struct_type| name: { + const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys(); + break :name if (field_names.len == 1) + .{ .name = field_names[0] } + else + .{ .count = field_names.len }; + }, + else => unreachable, + }; + switch (field_info) { + .name => |field_name| { + const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); + return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); + }, + .count => |field_count| { + assert(field_count != 1); + const msg = msg: { + const msg = if (field_count > 1) try sema.errMsg( + block, + inst_src, + "cannot initialize multiple union fields at once; unions can only have one active field", + .{}, + ) else try sema.errMsg( + block, + inst_src, + "union initializer must initialize one field", + .{}, + ); + errdefer msg.destroy(sema.gpa); - // TODO add notes for where the anon struct was created to point out - // the extra fields. + // TODO add notes for where the anon struct was created to point out + // the extra fields. - try sema.addDeclaredHereNote(msg, union_ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + try sema.addDeclaredHereNote(msg, union_ty); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, } - - const anon_struct = inst_ty.castTag(.anon_struct).?.data; - const field_name = anon_struct.names[0]; - const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); - return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); } fn coerceAnonStructToUnionPtrs( @@ -28544,7 +29225,8 @@ fn coerceAnonStructToUnionPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const union_ty = ptr_union_ty.childType(); + const mod = sema.mod; + const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); return sema.analyzeRef(block, union_ty_src, union_inst); @@ -28558,7 +29240,8 @@ fn coerceAnonStructToStructPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const struct_ty = ptr_struct_ty.childType(); + const mod = sema.mod; + const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); return sema.analyzeRef(block, struct_ty_src, struct_inst); @@ -28573,15 +29256,16 @@ fn coerceArrayLike( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen()); - const target = sema.mod.getTarget(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod)); + const target = mod.getTarget(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + dest_ty.fmt(mod), inst_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -28591,35 +29275,32 @@ fn coerceArrayLike( return sema.failWithOwnedErrorMsg(msg); } - const dest_elem_ty = dest_ty.childType(); - const inst_elem_ty = inst_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); + const inst_elem_ty = inst_ty.childType(mod); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { // These types share the same comptime value representation. - return sema.addConstant(dest_ty, inst_val); + return sema.coerceInMemory(block, inst_val, inst_ty, dest_ty, dest_ty_src); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); } - const element_vals = try sema.arena.alloc(Value, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_len); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i| { - const index_ref = try sema.addConstant( - Type.usize, - try Value.Tag.int_u64.create(sema.arena, i), - ); + for (element_vals, element_refs, 0..) |*val, *ref, i| { + const index_ref = try sema.addConstant(Type.usize, try mod.intValue(Type.usize, i)); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28631,10 +29312,10 @@ fn coerceArrayLike( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28646,9 +29327,10 @@ fn coerceTupleToArray( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = dest_ty.arrayLen(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = dest_ty.arrayLen(mod); if (dest_len != inst_len) { const msg = msg: { @@ -28663,26 +29345,27 @@ fn coerceTupleToArray( return sema.failWithOwnedErrorMsg(msg); } - const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel()); - const element_vals = try sema.arena.alloc(Value, dest_elems); + const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); - const dest_elem_ty = dest_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i_usize| { + for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel().?; - element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); + const sentinel_val = dest_ty.sentinel(mod).?; + val.* = sentinel_val.toIntern(); + ref.* = try sema.addConstant(dest_elem_ty, sentinel_val); break; } const elem_src = inst_src; // TODO better source location const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28694,10 +29377,10 @@ fn coerceTupleToArray( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28709,10 +29392,11 @@ fn coerceTupleToSlicePtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const tuple_ty = sema.typeOf(ptr_tuple).childType(); + const mod = sema.mod; + const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const slice_info = slice_ty.ptrInfo().data; - const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); + const slice_info = slice_ty.ptrInfo(mod); + const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(mod), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{}); @@ -28730,8 +29414,9 @@ fn coerceTupleToArrayPtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const ptr_info = ptr_array_ty.ptrInfo().data; + const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = ptr_info.pointee_type; const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.@"align" != 0) { @@ -28750,27 +29435,41 @@ fn coerceTupleToStruct( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const struct_ty = try sema.resolveTypeFields(dest_ty); - if (struct_ty.isTupleOrAnonStruct()) { + if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } - const fields = struct_ty.structFields(); - const field_vals = try sema.arena.alloc(Value, fields.count()); + const fields = struct_ty.structFields(mod); + const field_vals = try sema.arena.alloc(InternPool.Index, fields.count()); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; - const field_count = inst_ty.structFieldCount(); - var field_i: u32 = 0; - while (field_i < field_count) : (field_i += 1) { - const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] + const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + 0, + else => unreachable, + }; + for (0..field_count) |field_index_usize| { + const field_i = @intCast(u32, field_index_usize); + const field_src = inst_src; // TODO better source location + // https://github.com/ziglang/zig/issues/15709 + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + anon_struct_type.names[field_i] + else + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); const field = fields.values()[field_index]; const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); @@ -28781,13 +29480,13 @@ fn coerceTupleToStruct( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(field.default_val, field.ty, sema.mod)) { + if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -28804,9 +29503,9 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.tag() == .unreachable_value) { - const template = "missing struct field: {s}"; - const args = .{field_name}; + if (field.default_val == .none) { + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -28817,7 +29516,7 @@ fn coerceTupleToStruct( if (runtime_src == null) { field_vals[i] = field.default_val; } else { - field_ref.* = try sema.addConstant(field.ty, field.default_val); + field_ref.* = try sema.addConstant(field.ty, field.default_val.toValue()); } } @@ -28832,10 +29531,14 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - return sema.addConstant( - struct_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), - ); + const struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(struct_val); + + return sema.addConstant(struct_ty, struct_val.toValue()); } fn coerceTupleToTuple( @@ -28845,47 +29548,76 @@ fn coerceTupleToTuple( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_field_count = tuple_ty.structFieldCount(); - const field_vals = try sema.arena.alloc(Value, dest_field_count); + const mod = sema.mod; + const ip = &mod.intern_pool; + const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const inst_field_count = inst_ty.structFieldCount(); - if (inst_field_count > dest_field_count) return error.NotCoercible; + const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + if (src_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; - var field_i: u32 = 0; - while (field_i < inst_field_count) : (field_i += 1) { + for (0..dest_field_count) |field_index_usize| { + const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + // https://github.com/ziglang/zig/issues/15709 + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + anon_struct_type.names[field_i] + else + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); - } + + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, + else => unreachable, + }; + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val, + else => unreachable, + }; const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); - const field_ty = tuple_ty.structFieldType(field_i); - const default_val = tuple_ty.structFieldDefaultValue(field_i); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; - if (default_val.tag() != .unreachable_value) { + if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_val, field_ty, sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -28899,12 +29631,15 @@ fn coerceTupleToTuple( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const default_val = tuple_ty.structFieldDefaultValue(i); - const field_ty = tuple_ty.structFieldType(i); + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[i], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val, + else => unreachable, + }; const field_src = inst_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { - if (tuple_ty.isTuple()) { + if (default_val == .none) { + if (tuple_ty.isTuple(mod)) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, .{i}); @@ -28913,8 +29648,8 @@ fn coerceTupleToTuple( } continue; } - const template = "missing struct field: {s}"; - const args = .{tuple_ty.structFieldName(i)}; + const template = "missing struct field: {}"; + const args = .{tuple_ty.structFieldName(i, mod).fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -28925,7 +29660,12 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - field_ref.* = try sema.addConstant(field_ty, default_val); + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].ty, + else => unreachable, + }; + field_ref.* = try sema.addConstant(field_ty, default_val.toValue()); } } @@ -28942,7 +29682,10 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), + (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(), ); } @@ -28959,7 +29702,7 @@ fn analyzeDeclVal( const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { - if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) { + if (sema.air_instructions.items(.tag)[index] == .interned and !block.is_typeof) { try sema.decl_val_table.put(sema.gpa, decl_index, result); } } @@ -28980,13 +29723,14 @@ fn addReferencedBy( } fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); if (decl.analysis == .in_progress) { - const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(), "dependency loop detected", .{}); + const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{}); return sema.failWithOwnedErrorMsg(msg); } - sema.mod.ensureDeclAnalyzed(decl_index) catch |err| { + mod.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { @@ -28996,7 +29740,7 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { }; } -fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { +fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void { sema.mod.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; @@ -29008,23 +29752,33 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { } fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { + const mod = sema.mod; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), - try val.copy(anon_decl.arena()), + ty, + val, 0, // default alignment ); try sema.maybeQueueFuncBodyAnalysis(decl); - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); - return try Value.Tag.decl_ref.create(sema.arena, decl); + try mod.declareDeclDependency(sema.owner_decl_index, decl); + const result = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).toIntern(), + .addr = .{ .decl = decl }, + } }); + return result.toValue(); } fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { - const val = opt_val orelse return Value.null; - const ptr_val = try sema.refValue(block, ty, val); - const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val); - return result; + const mod = sema.mod; + const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque); + return (try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), + .val = if (opt_val) |val| (try mod.getCoerced( + try sema.refValue(block, ty, val), + ptr_anyopaque_ty, + )).toIntern() else .none, + } })).toValue(); } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { @@ -29036,42 +29790,37 @@ fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref { - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const mod = sema.mod; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.castTag(.variable)) |payload| { - const variable = payload.data; - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = decl_tv.ty, - .mutable = variable.is_mutable, - .@"addrspace" = decl.@"addrspace", - .@"align" = decl.@"align", - }); - return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl_index)); - } + const ptr_ty = try mod.ptrType(.{ + .child = decl_tv.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), + .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true, + .address_space = decl.@"addrspace", + }, + }); if (analyze_fn_body) { try sema.maybeQueueFuncBodyAnalysis(decl_index); } - return sema.addConstant( - try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = decl_tv.ty, - .mutable = false, - .@"addrspace" = decl.@"addrspace", - .@"align" = decl.@"align", - }), - try Value.Tag.decl_ref.create(sema.arena, decl_index), - ); + return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .decl = decl_index }, + } })).toValue()); } fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); const tv = try decl.typedValue(); - if (tv.ty.zigTypeTag() != .Fn) return; + if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; - const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try sema.mod.ensureFuncBodyAnalysisQueued(func.data); + const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn + try mod.ensureFuncBodyAnalysisQueued(func_index); } fn analyzeRef( @@ -29083,18 +29832,16 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); - }, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), - try val.copy(anon_decl.arena()), + operand_ty, + val, 0, // default alignment )); } @@ -29124,9 +29871,10 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - const elem_ty = switch (ptr_ty.zigTypeTag()) { - .Pointer => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { + .Pointer => ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29136,11 +29884,11 @@ fn analyzeLoad( if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| { - return sema.addConstant(elem_ty, elem_val); + return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty)); } } - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -29163,11 +29911,11 @@ fn analyzeSlicePtr( slice: Air.Inst.Ref, slice_ty: Type, ) CompileError!Air.Inst.Ref { - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const result_ty = slice_ty.slicePtrFieldType(buf); + const mod = sema.mod; + const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); - return sema.addConstant(result_ty, val.slicePtr()); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); + return sema.addConstant(result_ty, val.slicePtr(mod)); } try sema.requireRuntimeBlock(block, slice_src, null); return block.addTyOp(.slice_ptr, result_ty, slice); @@ -29179,8 +29927,9 @@ fn analyzeSliceLen( src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(Type.usize); } return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); @@ -29196,12 +29945,13 @@ fn analyzeIsNull( operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { - if (opt_val.isUndef()) { + if (opt_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } - const is_null = opt_val.isNull(); + const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; @@ -29212,11 +29962,10 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); - var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } - if (operand_ty.zigTypeTag() != .Optional and !operand_ty.isPtrLikeOptional()) { + if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { return inverted_non_null_res; } try sema.requireRuntimeBlock(block, src, null); @@ -29230,11 +29979,12 @@ fn analyzePtrIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(operand); - assert(ptr_ty.zigTypeTag() == .Pointer); - const child_ty = ptr_ty.childType(); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); + const child_ty = ptr_ty.childType(mod); - const child_tag = child_ty.zigTypeTag(); + const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false; assert(child_tag == .ErrorUnion); @@ -29251,14 +30001,15 @@ fn analyzeIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - const ot = operand_ty.zigTypeTag(); + const ot = operand_ty.zigTypeTag(mod); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); - const payload_ty = operand_ty.errorUnionPayload(); - if (payload_ty.zigTypeTag() == .NoReturn) { + const payload_ty = operand_ty.errorUnionPayload(mod); + if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29279,50 +30030,56 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. - const set_ty = operand_ty.errorUnionSet(); - switch (set_ty.tag()) { - .anyerror => {}, - .error_set_inferred => blk: { - // If the error set is empty, we must return a comptime true or false. - // However we want to avoid unnecessarily resolving an inferred error set - // in case it is already non-empty. - const ies = set_ty.castTag(.error_set_inferred).?.data; - if (ies.is_anyerror) break :blk; - if (ies.errors.count() != 0) break :blk; - if (maybe_operand_val == null) { - // Try to avoid resolving inferred error set if possible. + const set_ty = operand_ty.errorUnionSet(mod); + switch (set_ty.toIntern()) { + .anyerror_type => {}, + else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { + .error_set_type => |error_set_type| { + if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; + }, + .inferred_error_set_type => |ies_index| blk: { + // If the error set is empty, we must return a comptime true or false. + // However we want to avoid unnecessarily resolving an inferred error set + // in case it is already non-empty. + const ies = mod.inferredErrorSetPtr(ies_index); + if (ies.is_anyerror) break :blk; if (ies.errors.count() != 0) break :blk; - if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); - if (other_ies.is_anyerror) { - ies.is_anyerror = true; - ies.is_resolved = true; - break :blk; - } + if (maybe_operand_val == null) { + // Try to avoid resolving inferred error set if possible. + if (ies.errors.count() != 0) break :blk; + if (ies.is_anyerror) break :blk; + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); + if (other_ies.is_anyerror) { + ies.is_anyerror = true; + ies.is_resolved = true; + break :blk; + } - if (other_ies.errors.count() != 0) break :blk; + if (other_ies.errors.count() != 0) break :blk; + } + if (ies.func == sema.owner_func_index.unwrap()) { + // We're checking the inferred errorset of the current function and none of + // its child inferred error sets contained any errors meaning that any value + // so far with this type can't contain errors either. + return Air.Inst.Ref.bool_true; + } + try sema.resolveInferredErrorSet(block, src, ies_index); + if (ies.is_anyerror) break :blk; + if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; } - if (ies.func == sema.owner_func) { - // We're checking the inferred errorset of the current function and none of - // its child inferred error sets contained any errors meaning that any value - // so far with this type can't contain errors either. - return Air.Inst.Ref.bool_true; - } - try sema.resolveInferredErrorSet(block, src, ies); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; - } + }, + else => unreachable, }, - else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, } if (maybe_operand_val) |err_union| { - if (err_union.isUndef()) { + if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (err_union.getError() == null) { + if (err_union.getErrorName(mod) == .none) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -29375,72 +30132,78 @@ fn analyzeSlice( end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); - const target = sema.mod.getTarget(); - const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { - .Pointer => ptr_ptr_ty.elemType(), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), + const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { + .Pointer => ptr_ptr_ty.childType(mod), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}), }; - const mod = sema.mod; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty: Type = undefined; var ptr_sentinel: ?Value = null; - switch (ptr_ptr_child_ty.zigTypeTag()) { + switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); - elem_ty = ptr_ptr_child_ty.childType(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); + elem_ty = ptr_ptr_child_ty.childType(mod); }, - .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { + .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) { .One => { - const double_child_ty = ptr_ptr_child_ty.childType(); - if (double_child_ty.zigTypeTag() == .Array) { - ptr_sentinel = double_child_ty.sentinel(); + const double_child_ty = ptr_ptr_child_ty.childType(mod); + if (double_child_ty.zigTypeTag(mod) == .Array) { + ptr_sentinel = double_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; - elem_ty = double_child_ty.childType(); + elem_ty = double_child_ty.childType(mod); } else { return sema.fail(block, src, "slice of single-item pointer", .{}); } }, .Many, .C => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); - if (ptr_ptr_child_ty.ptrSize() == .C) { + if (ptr_ptr_child_ty.ptrSize(mod) == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { - if (ptr_val.isNull()) { + if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); } } } }, .Slice => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); }, }, else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), } - const ptr = if (slice_ty.isSlice()) + const ptr = if (slice_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) - else - ptr_or_slice; + else if (array_ty.zigTypeTag(mod) == .Array) ptr: { + var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type; + assert(manyptr_ty_key.child == array_ty.toIntern()); + assert(manyptr_ty_key.flags.size == .One); + manyptr_ty_key.child = elem_ty.toIntern(); + manyptr_ty_key.flags.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); + const new_ptr_ty = sema.typeOf(new_ptr); // true if and only if the end index of the slice, implicitly or explicitly, equals // the length of the underlying object being sliced. we might learn the length of the @@ -29448,8 +30211,8 @@ fn analyzeSlice( // we might learn of the length because it is a comptime-known slice value. var end_is_len = uncasted_end_opt == .none; const end = e: { - if (array_ty.zigTypeTag() == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); + if (array_ty.zigTypeTag(mod) == .Array) { + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29458,12 +30221,12 @@ fn analyzeSlice( break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveMaybeUndefVal(end)) |end_val| { - const len_s_val = try Value.Tag.int_u64.create( - sema.arena, - array_ty.arrayLenIncludingSentinel(), + const len_s_val = try mod.intValue( + Type.usize, + array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { - const sentinel_label: []const u8 = if (array_ty.sentinel() != null) + const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null) " +1 (sentinel)" else ""; @@ -29491,7 +30254,7 @@ fn analyzeSlice( } break :e try sema.addConstant(Type.usize, len_val); - } else if (slice_ty.isSlice()) { + } else if (slice_ty.isSlice(mod)) { if (!end_is_len) { const end = if (by_length) end: { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); @@ -29500,16 +30263,14 @@ fn analyzeSlice( } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.fail(block, src, "slice of undefined", .{}); } - const has_sentinel = slice_ty.sentinel() != null; - var int_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), - }; - const slice_len_val = Value.initPayload(&int_payload.base); - if (!(try sema.compareAll(end_val, .lte, slice_len_val, Type.usize))) { + const has_sentinel = slice_ty.sentinel(mod) != null; + const slice_len = slice_val.sliceLen(mod); + const len_plus_sent = slice_len + @boolToInt(has_sentinel); + const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); + if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -29527,13 +30288,10 @@ fn analyzeSlice( ); } - // If the slice has a sentinel, we subtract one so that - // end_is_len is only true if it equals the length WITHOUT - // the sentinel, so we don't add a sentinel type. - if (has_sentinel) { - int_payload.data -= 1; - } - + // If the slice has a sentinel, we consider end_is_len + // is only true if it equals the length WITHOUT the + // sentinel, so we don't add a sentinel type. + const slice_len_val = try mod.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } @@ -29569,11 +30327,12 @@ fn analyzeSlice( }; const slice_sentinel = if (sentinel_opt != .none) sentinel else null; + var checked_start_lte_end = by_length; + var runtime_src: ?LazySrcLoc = null; + // requirement: start <= end - var need_start_gt_end_check = true; if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| { - need_start_gt_end_check = false; if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) { return sema.fail( block, @@ -29585,14 +30344,18 @@ fn analyzeSlice( }, ); } + checked_start_lte_end = true; if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; - const start_int = start_val.getUnsignedInt(sema.mod.getTarget()).?; - const end_int = end_val.getUnsignedInt(sema.mod.getTarget()).?; + const start_int = start_val.getUnsignedInt(mod).?; + const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod); - const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); + const many_ptr_ty = try mod.manyConstPtrType(elem_ty); + const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); + const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); + const elem_ptr = try many_ptr_val.elemPtr(elem_ptr_ty, sentinel_index, mod); + const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, .val => |v| v, @@ -29600,36 +30363,49 @@ fn analyzeSlice( block, src, "comptime dereference requires '{}' to have a well-defined layout, but it does not.", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ), .out_of_bounds => |ty| return sema.fail( block, end_src, "slice end index {d} exceeds bounds of containing decl of type '{}'", - .{ end_int, ty.fmt(sema.mod) }, + .{ end_int, ty.fmt(mod) }, ), }; - if (!actual_sentinel.eql(expected_sentinel, elem_ty, sema.mod)) { + if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{ - expected_sentinel.fmtValue(elem_ty, sema.mod), - actual_sentinel.fmtValue(elem_ty, sema.mod), + expected_sentinel.fmtValue(elem_ty, mod), + actual_sentinel.fmtValue(elem_ty, mod), }); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else { + runtime_src = ptr_src; } + } else { + runtime_src = start_src; } + } else { + runtime_src = end_src; } - if (!by_length and block.wantSafety() and !block.is_comptime and need_start_gt_end_check) { + if (!checked_start_lte_end and block.wantSafety() and !block.is_comptime) { // requirement: start <= end - try sema.panicStartGreaterThanEnd(block, start, end); + assert(!block.is_comptime); + try sema.requireRuntimeBlock(block, src, runtime_src.?); + const ok = try block.addBinOp(.cmp_lte, start, end); + if (!sema.mod.comp.formatted_panics) { + try sema.addSafetyCheck(block, ok, .start_index_greater_than_end); + } else { + try sema.safetyCheckFormatted(block, ok, "panicStartGreaterThanEnd", &.{ start, end }); + } } const new_len = if (by_length) try sema.coerce(block, Type.usize, uncasted_end_opt, end_src) @@ -29637,11 +30413,11 @@ fn analyzeSlice( try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); - const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; - const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; + const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod); + const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = new_len_val.toUnsignedInt(target); + const new_len_int = new_len_val.toUnsignedInt(mod); const return_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod), @@ -29659,14 +30435,14 @@ fn analyzeSlice( const result = try block.addBitCast(return_ty, new_ptr); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - if (slice_ty.isSlice()) { + if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - const actual_len = if (slice_ty.sentinel() == null) + const actual_len = if (slice_ty.sentinel(mod) == null) slice_len_inst else try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29685,8 +30461,11 @@ fn analyzeSlice( return result; }; - if (!new_ptr_val.isUndef()) { - return sema.addConstant(return_ty, new_ptr_val); + if (!new_ptr_val.isUndef(mod)) { + return sema.addConstant(return_ty, try mod.getCoerced( + (try new_ptr_val.intern(new_ptr_ty, mod)).toValue(), + return_ty, + )); } // Special case: @as([]i32, undefined)[x..x] @@ -29708,25 +30487,18 @@ fn analyzeSlice( .size = .Slice, }); - const runtime_src = if ((try sema.resolveMaybeUndefVal(ptr_or_slice)) == null) - ptr_src - else if ((try sema.resolveMaybeUndefVal(start)) == null) - start_src - else - end_src; - - try sema.requireRuntimeBlock(block, src, runtime_src); + try sema.requireRuntimeBlock(block, src, runtime_src.?); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } // requirement: end <= len - const opt_len_inst = if (array_ty.zigTypeTag() == .Array) - try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) - else if (slice_ty.isSlice()) blk: { + const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) + try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) + else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel @@ -29734,7 +30506,7 @@ fn analyzeSlice( } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - if (slice_ty.sentinel() == null) break :blk slice_len_inst; + if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29778,15 +30550,16 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - assert(lhs_ty.isNumeric()); - assert(rhs_ty.isNumeric()); + assert(lhs_ty.isNumeric(mod)); + assert(rhs_ty.isNumeric(mod)); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); - const target = sema.mod.getTarget(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); + const target = mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to // coerce to fixed-width float. @@ -29805,49 +30578,45 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) { - try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) { - try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (lhs_val.isNan() or rhs_val.isNan()) { + if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { if (op == std.math.CompareOperator.neq) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } - if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema)) { + if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var - try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } break :src rhs_src; } } else { - if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) { + if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| { + if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const - try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29901,32 +30670,31 @@ fn cmpNumeric( const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); + (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); + (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; - if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { - try sema.resolveLazyValue(lhs_val); - if (lhs_val.isUndef()) + if (try sema.resolveMaybeUndefLazyVal(lhs)) |lhs_val| { + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - if (lhs_val.isNan()) switch (op) { + if (lhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, else => return Air.Inst.Ref.bool_false, }; - if (lhs_val.isInf()) switch (op) { + if (lhs_val.isInf(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, .eq => return Air.Inst.Ref.bool_false, - .gt, .gte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, - .lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, + .gt, .gte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, + .lt, .lte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, }; if (!rhs_is_signed) { - switch (lhs_val.orderAgainstZero()) { + switch (lhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return Air.Inst.Ref.bool_true, @@ -29940,7 +30708,7 @@ fn cmpNumeric( } } if (lhs_is_float) { - if (lhs_val.floatHasFraction()) { + if (lhs_val.floatHasFraction(mod)) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, @@ -29948,9 +30716,9 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); defer bigint.deinit(); - if (lhs_val.floatHasFraction()) { + if (lhs_val.floatHasFraction(mod)) { if (lhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { @@ -29959,33 +30727,32 @@ fn cmpNumeric( } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { - lhs_bits = lhs_val.intBitCountTwosComp(target); + lhs_bits = lhs_val.intBitCountTwosComp(mod); } lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { - const int_info = lhs_ty.intInfo(target); + const int_info = lhs_ty.intInfo(mod); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; - if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - try sema.resolveLazyValue(rhs_val); - if (rhs_val.isUndef()) + if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| { + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - if (rhs_val.isNan()) switch (op) { + if (rhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, else => return Air.Inst.Ref.bool_false, }; - if (rhs_val.isInf()) switch (op) { + if (rhs_val.isInf(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, .eq => return Air.Inst.Ref.bool_false, - .gt, .gte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, - .lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, + .gt, .gte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, + .lt, .lte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, }; if (!lhs_is_signed) { - switch (rhs_val.orderAgainstZero()) { + switch (rhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return Air.Inst.Ref.bool_true, @@ -29999,7 +30766,7 @@ fn cmpNumeric( } } if (rhs_is_float) { - if (rhs_val.floatHasFraction()) { + if (rhs_val.floatHasFraction(mod)) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, @@ -30007,9 +30774,9 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); defer bigint.deinit(); - if (rhs_val.floatHasFraction()) { + if (rhs_val.floatHasFraction(mod)) { if (rhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { @@ -30018,13 +30785,13 @@ fn cmpNumeric( } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { - rhs_bits = rhs_val.intBitCountTwosComp(target); + rhs_bits = rhs_val.intBitCountTwosComp(mod); } rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { - const int_info = rhs_ty.intInfo(target); + const int_info = rhs_ty.intInfo(mod); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -30032,7 +30799,7 @@ fn cmpNumeric( const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; - break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); + break :blk try mod.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); @@ -30040,13 +30807,20 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs); } -/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int. -/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result. +/// Asserts that LHS value is an int or comptime int and not undefined, and +/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to +/// determine whether `op` has a guaranteed result. /// If it cannot be determined, returns null. /// Otherwise returns a bool for the guaranteed comparison operation. -fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool { - const rhs_info = rhs_ty.intInfo(target); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable; +fn compareIntsOnlyPossibleResult( + sema: *Sema, + lhs_val: Value, + op: std.math.CompareOperator, + rhs_ty: Type, +) Allocator.Error!?bool { + const mod = sema.mod; + const rhs_info = rhs_ty.intInfo(mod); + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -30078,7 +30852,7 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value }; const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj; + const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. @@ -30111,12 +30885,11 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value .max = false, }; - var ty_buffer: Type.Payload.Bits = .{ - .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned }, - .data = @intCast(u16, req_bits), - }; - const ty = Type.initPayload(&ty_buffer.base); - const pop_count = lhs_val.popCount(ty, target); + const ty = try mod.intType( + if (is_negative) .signed else .unsigned, + @intCast(u16, req_bits), + ); + const pop_count = lhs_val.popCount(ty, mod); if (is_negative) { break :edge .{ @@ -30152,22 +30925,26 @@ fn cmpVector( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - assert(lhs_ty.zigTypeTag() == .Vector); - assert(rhs_ty.zigTypeTag() == .Vector); + assert(lhs_ty.zigTypeTag(mod) == .Vector); + assert(rhs_ty.zigTypeTag(mod) == .Vector); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } }); const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); - const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = lhs_ty.vectorLen(mod), + .child = .bool_type, + }); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); @@ -30192,7 +30969,10 @@ fn wrapOptional( inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = val.toIntern(), + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30206,10 +30986,14 @@ fn wrapErrorUnionPayload( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_payload_ty = dest_ty.errorUnionPayload(); + const mod = sema.mod; + const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.toIntern(), + .val = .{ .payload = try val.intern(dest_payload_ty, mod) }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); try sema.queueFullTypeResolution(dest_payload_ty); @@ -30223,48 +31007,41 @@ fn wrapErrorUnionSet( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); - const dest_err_set_ty = dest_ty.errorUnionSet(); + const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.tag()) { - .anyerror => {}, - .error_set_single => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const n = dest_err_set_ty.castTag(.error_set_single).?.data; - if (mem.eql(u8, expected_name, n)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set).?.data; - if (!error_set.names.contains(expected_name)) { + switch (dest_err_set_ty.toIntern()) { + .anyerror_type => {}, + else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { + .error_set_type => |error_set_type| ok: { + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } - }, - .error_set_inferred => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; + }, + .inferred_error_set_type => |ies_index| ok: { + const ies = mod.inferredErrorSetPtr(ies_index); + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; - // We carefully do this in an order that avoids unnecessarily - // resolving the destination error set type. - if (ies.is_anyerror) break :ok; - if (ies.errors.contains(expected_name)) break :ok; - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + // We carefully do this in an order that avoids unnecessarily + // resolving the destination error set type. + if (ies.is_anyerror) break :ok; + + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set_merged => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; - if (!error_set.contains(expected_name)) { return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } + }, + else => unreachable, }, - else => unreachable, } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.toIntern(), + .val = .{ + .err_name = mod.intern_pool.indexToKey(try val.intern(dest_err_set_ty, mod)).err.name, + }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30279,11 +31056,12 @@ fn unionToTag( un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| { return sema.addConstant(enum_ty, opv); } if (try sema.resolveMaybeUndefVal(un)) |un_val| { - return sema.addConstant(enum_ty, un_val.unionTag()); + return sema.addConstant(enum_ty, un_val.unionTag(mod)); } try sema.requireRuntimeBlock(block, un_src, null); return block.addTyOp(.get_union_tag, enum_ty, un); @@ -30296,16 +31074,17 @@ fn resolvePeerTypes( instructions: []const Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { + const mod = sema.mod; switch (instructions.len) { - 0 => return Type.initTag(.noreturn), + 0 => return Type.noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var chosen = instructions[0]; - // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(). + // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod). // * ErrorSet: this is an override // * ErrorUnion: this is an override of the error set only // * other: at the end we make an ErrorUnion with the other thing and this @@ -30318,8 +31097,8 @@ fn resolvePeerTypes( const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); - const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); - const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); + const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod); + const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod); // If the candidate can coerce into our chosen type, we're done. // If the chosen type can coerce into the candidate, use that. @@ -30347,8 +31126,8 @@ fn resolvePeerTypes( continue; }, .Int => { - const chosen_info = chosen_ty.intInfo(target); - const candidate_info = candidate_ty.intInfo(target); + const chosen_info = chosen_ty.intInfo(mod); + const candidate_info = candidate_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; @@ -30356,12 +31135,12 @@ fn resolvePeerTypes( } continue; }, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { @@ -30426,11 +31205,11 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, .ErrorUnion => { - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { continue; @@ -30440,7 +31219,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, else => { @@ -30453,7 +31232,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; } else { err_set_ty = candidate_ty; @@ -30464,14 +31243,14 @@ fn resolvePeerTypes( .ErrorUnion => switch (chosen_ty_tag) { .ErrorSet => { const chosen_set_ty = err_set_ty orelse chosen_ty; - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } chosen = candidate; chosen_i = candidate_i + 1; @@ -30479,8 +31258,8 @@ fn resolvePeerTypes( }, .ErrorUnion => { - const chosen_payload_ty = chosen_ty.errorUnionPayload(); - const candidate_payload_ty = candidate_ty.errorUnionPayload(); + const chosen_payload_ty = chosen_ty.errorUnionPayload(mod); + const candidate_payload_ty = candidate_ty.errorUnionPayload(mod); const coerce_chosen = (try sema.coerceInMemoryAllowed(block, chosen_payload_ty, candidate_payload_ty, false, target, src, src)) == .ok; const coerce_candidate = (try sema.coerceInMemoryAllowed(block, candidate_payload_ty, chosen_payload_ty, false, target, src, src)) == .ok; @@ -30494,15 +31273,15 @@ fn resolvePeerTypes( chosen_i = candidate_i + 1; } - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - const candidate_set_ty = candidate_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = candidate_set_ty; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } continue; } @@ -30510,26 +31289,26 @@ fn resolvePeerTypes( else => { if (err_set_ty) |chosen_set_ty| { - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); chosen = candidate; chosen_i = candidate_i + 1; continue; }, }, .Pointer => { - const cand_info = candidate_ty.ptrInfo().data; + const cand_info = candidate_ty.ptrInfo(mod); switch (chosen_ty_tag) { .Pointer => { - const chosen_info = chosen_ty.ptrInfo().data; + const chosen_info = chosen_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30537,7 +31316,7 @@ fn resolvePeerTypes( // *[N]T to []T if ((cand_info.size == .Many or cand_info.size == .Slice) and chosen_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` convert_to_slice = false; @@ -30546,7 +31325,7 @@ fn resolvePeerTypes( continue; } if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` @@ -30559,11 +31338,11 @@ fn resolvePeerTypes( // Keep the one whose element type can be coerced into. if (chosen_info.size == .One and cand_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array and - cand_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array) { - const chosen_elem_ty = chosen_info.pointee_type.childType(); - const cand_elem_ty = cand_info.pointee_type.childType(); + const chosen_elem_ty = chosen_info.pointee_type.childType(mod); + const cand_elem_ty = cand_info.pointee_type.childType(mod); const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src); if (chosen_ok) { @@ -30629,17 +31408,16 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_ptr_ty = chosen_ty.optionalChild(mod); + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; // *[N]T to ?![*]T // *[N]T to ?![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30647,16 +31425,16 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const chosen_ptr_ty = chosen_ty.errorUnionPayload(); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_ptr_ty = chosen_ty.errorUnionPayload(mod); + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; // *[N]T to E![*]T // *[N]T to E![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30664,7 +31442,7 @@ fn resolvePeerTypes( } }, .Fn => { - if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag() == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { + if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30674,15 +31452,14 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); + const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { - seen_const = seen_const or opt_child_ty.isConstPtr(); + seen_const = seen_const or opt_child_ty.isConstPtr(mod); any_are_null = true; continue; } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); any_are_null = false; chosen = candidate; chosen_i = candidate_i + 1; @@ -30690,23 +31467,23 @@ fn resolvePeerTypes( }, .Vector => switch (chosen_ty_tag) { .Vector => { - const chosen_len = chosen_ty.vectorLen(); - const candidate_len = candidate_ty.vectorLen(); + const chosen_len = chosen_ty.vectorLen(mod); + const candidate_len = candidate_ty.vectorLen(mod); if (chosen_len != candidate_len) continue; - const chosen_child_ty = chosen_ty.childType(); - const candidate_child_ty = candidate_ty.childType(); - if (chosen_child_ty.zigTypeTag() == .Int and candidate_child_ty.zigTypeTag() == .Int) { - const chosen_info = chosen_child_ty.intInfo(target); - const candidate_info = candidate_child_ty.intInfo(target); + const chosen_child_ty = chosen_ty.childType(mod); + const candidate_child_ty = candidate_ty.childType(mod); + if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { + const chosen_info = chosen_child_ty.intInfo(mod); + const candidate_info = candidate_child_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } - if (chosen_child_ty.zigTypeTag() == .Float and candidate_child_ty.zigTypeTag() == .Float) { + if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; @@ -30725,8 +31502,8 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag() == .Fn) { - if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } }, @@ -30746,8 +31523,7 @@ fn resolvePeerTypes( continue; }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); + const opt_child_ty = chosen_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30759,7 +31535,7 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); + const payload_ty = chosen_ty.errorUnionPayload(mod); if ((try sema.coerceInMemoryAllowed(block, payload_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30776,7 +31552,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, chosen_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, chosen_ty); continue; } else { err_set_ty = chosen_ty; @@ -30789,28 +31565,28 @@ fn resolvePeerTypes( // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( - sema.gpa, - sema.mod.declPtr(block.src_decl), + mod, + mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( - sema.gpa, - sema.mod.declPtr(block.src_decl), + mod, + mod.declPtr(block.src_decl), candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(sema.mod), - candidate_ty.fmt(sema.mod), + chosen_ty.fmt(mod), + candidate_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)}); if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)}); break :msg msg; }; @@ -30821,139 +31597,231 @@ fn resolvePeerTypes( if (convert_to_slice) { // turn *[N]T => []T - const chosen_child_ty = chosen_ty.childType(); - var info = chosen_ty.ptrInfo(); - info.data.sentinel = chosen_child_ty.sentinel(); - info.data.size = .Slice; - info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(); + const chosen_child_ty = chosen_ty.childType(mod); + var info = chosen_ty.ptrInfo(mod); + info.sentinel = chosen_child_ty.sentinel(mod); + info.size = .Slice; + info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod)); + info.pointee_type = chosen_child_ty.elemType2(mod); - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); } if (seen_const) { // turn []T => []const T - switch (chosen_ty.zigTypeTag()) { + switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { - const ptr_ty = chosen_ty.errorUnionPayload(); - var info = ptr_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const ptr_ty = chosen_ty.errorUnionPayload(mod); + var info = ptr_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; - const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, .Pointer => { - var info = chosen_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + var info = chosen_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, else => return chosen_ty, } } if (any_are_null) { - const opt_ty = switch (chosen_ty.zigTypeTag()) { + const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, - else => try Type.optional(sema.arena, chosen_ty), + else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod); + return try mod.errorUnionType(set_ty, opt_ty); } - if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) { + if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod); + const payload_ty = chosen_ty.errorUnionPayload(mod); + return try mod.errorUnionType(ty, payload_ty); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod), + else => return try mod.errorUnionType(ty, chosen_ty), }; return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { - try sema.resolveTypeFully(fn_info.return_type); +pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { + const mod = sema.mod; + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.return_type.toType()); - if (sema.mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError()) { + if (mod.comp.bin_file.options.error_return_tracing and mod.typeToFunc(fn_ty).?.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } - for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + for (0..mod.typeToFunc(fn_ty).?.param_types.len) |i| { + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.param_types[i].toType()); } } /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. -fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (val.tag()) { - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return sema.resolveTypeLayout(ty); +fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { + const mod = sema.mod; + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return val, + .lazy_align, .lazy_size => return (try mod.intern(.{ .int = .{ + .ty = int.ty, + .storage = .{ .u64 = (try val.getUnsignedIntAdvanced(mod, sema)).? }, + } })).toValue(), }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return sema.resolveTypeLayout(ty); - }, - .comptime_field_ptr => { - const field_ptr = val.castTag(.comptime_field_ptr).?.data; - return sema.resolveLazyValue(field_ptr.field_val); - }, - .eu_payload, - .opt_payload, - => { - const sub_val = val.cast(Value.Payload.SubValue).?.data; - return sema.resolveLazyValue(sub_val); - }, - .@"union" => { - const union_val = val.castTag(.@"union").?.data; - return sema.resolveLazyValue(union_val.val); - }, - .aggregate => { - const aggregate = val.castTag(.aggregate).?.data; - for (aggregate) |elem_val| { - try sema.resolveLazyValue(elem_val); + .ptr => |ptr| { + const resolved_len = switch (ptr.len) { + .none => .none, + else => (try sema.resolveLazyValue(ptr.len.toValue())).toIntern(), + }; + switch (ptr.addr) { + .decl, .mut_decl => return if (resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .decl => |decl| .{ .decl = decl }, + .mut_decl => |mut_decl| .{ .mut_decl = mut_decl }, + else => unreachable, + }, + .len = resolved_len, + } })).toValue(), + .comptime_field => |field_val| { + const resolved_field_val = + (try sema.resolveLazyValue(field_val.toValue())).toIntern(); + return if (resolved_field_val == field_val and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = .{ .comptime_field = resolved_field_val }, + .len = resolved_len, + } })).toValue(); + }, + .int => |int| { + const resolved_int = (try sema.resolveLazyValue(int.toValue())).toIntern(); + return if (resolved_int == int and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = .{ .int = resolved_int }, + .len = resolved_len, + } })).toValue(); + }, + .eu_payload, .opt_payload => |base| { + const resolved_base = (try sema.resolveLazyValue(base.toValue())).toIntern(); + return if (resolved_base == base and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .eu_payload => .{ .eu_payload = resolved_base }, + .opt_payload => .{ .opt_payload = resolved_base }, + else => unreachable, + }, + .len = ptr.len, + } })).toValue(); + }, + .elem, .field => |base_index| { + const resolved_base = (try sema.resolveLazyValue(base_index.base.toValue())).toIntern(); + return if (resolved_base == base_index.base and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .elem => .{ .elem = .{ + .base = resolved_base, + .index = base_index.index, + } }, + .field => .{ .field = .{ + .base = resolved_base, + .index = base_index.index, + } }, + else => unreachable, + }, + .len = ptr.len, + } })).toValue(); + }, } }, - .slice => { - const slice = val.castTag(.slice).?.data; - try sema.resolveLazyValue(slice.ptr); - return sema.resolveLazyValue(slice.len); + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => return val, + .elems => |elems| { + var resolved_elems: []InternPool.Index = &.{}; + for (elems, 0..) |elem, i| { + const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern(); + if (resolved_elems.len == 0 and resolved_elem != elem) { + resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); + @memcpy(resolved_elems[0..i], elems[0..i]); + } + if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; + } + return if (resolved_elems.len == 0) val else (try mod.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .elems = resolved_elems }, + } })).toValue(); + }, + .repeated_elem => |elem| { + const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern(); + return if (resolved_elem == elem) val else (try mod.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .repeated_elem = resolved_elem }, + } })).toValue(); + }, }, - else => return, + .un => |un| { + const resolved_tag = (try sema.resolveLazyValue(un.tag.toValue())).toIntern(); + const resolved_val = (try sema.resolveLazyValue(un.val.toValue())).toIntern(); + return if (resolved_tag == un.tag and resolved_val == un.val) + val + else + (try mod.intern(.{ .un = .{ + .ty = un.ty, + .tag = resolved_tag, + .val = resolved_val, + } })).toValue(); + }, + else => return val, } } pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return; - const elem_ty = ty.childType(); + if (ty.arrayLenIncludingSentinel(mod) == 0) return; + const elem_ty = ty.childType(mod); return sema.resolveTypeLayout(elem_ty); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); // In case of querying the ABI alignment of this optional, we will ask // for hasRuntimeBits() of the payload type, so we need "requires comptime" // to be known already before this function returns. @@ -30961,37 +31829,37 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return sema.resolveTypeLayout(payload_ty); }, .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeLayout(param_ty); + try sema.resolveTypeLayout(param_ty.toType()); } - try sema.resolveTypeLayout(info.return_type); + try sema.resolveTypeLayout(info.return_type.toType()); }, else => {}, } } fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - if (resolved_ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(resolved_ty)) |struct_obj| { switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); return sema.failWithOwnedErrorMsg(msg); }, @@ -31015,35 +31883,27 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { } if (struct_obj.layout == .Packed) { - try semaBackingIntType(sema.mod, struct_obj); + try semaBackingIntType(mod, struct_obj); } struct_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct layout depends on it having runtime bits", .{}, ); return sema.failWithOwnedErrorMsg(msg); } - if (struct_obj.layout == .Auto and sema.mod.backendSupportsFeature(.field_reordering)) { - const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index) - try sema.perm_arena.alloc(u32, struct_obj.fields.count()) - else blk: { - const decl = sema.mod.declPtr(struct_obj.owner_decl); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(sema.mod.gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); - }; + if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) { + const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count()); for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (field.ty.hasRuntimeBits()) + optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) @intCast(u32, i) else Module.Struct.omitted_field; @@ -31054,11 +31914,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { sema: *Sema, fn lessThan(ctx: @This(), a: u32, b: u32) bool { + const m = ctx.sema.mod; if (a == Module.Struct.omitted_field) return false; if (b == Module.Struct.omitted_field) return true; - const target = ctx.sema.mod.getTarget(); - return ctx.struct_obj.fields.values()[a].ty.abiAlignment(target) > - ctx.struct_obj.fields.values()[b].ty.abiAlignment(target); + return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) > + ctx.struct_obj.fields.values()[b].ty.abiAlignment(m); } }; mem.sort(u32, optimized_order, AlignSortContext{ @@ -31073,20 +31933,16 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; - const target = mod.getTarget(); var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } const decl_index = struct_obj.owner_decl; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -31103,28 +31959,33 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -31148,21 +32009,27 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } } else { if (fields_bit_sum > std.math.maxInt(u16)) { var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = undefined, - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = undefined, }; defer sema.deinit(); @@ -31170,7 +32037,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = undefined, .instructions = .{}, .inlining = null, @@ -31178,32 +32045,29 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (!backing_int_ty.isInt()) { + if (!backing_int_ty.isInt(mod)) { return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); } - if (backing_int_ty.bitSize(target) != fields_bit_sum) { + if (backing_int_ty.bitSize(mod) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", - .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum }, + .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (!ty.isIndexable()) { + const mod = sema.mod; + if (!ty.isIndexable(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -31215,12 +32079,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (ty.zigTypeTag() == .Pointer) { - switch (ty.ptrSize()) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Pointer) { + switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, .One => { - const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() == .Array) return; + const elem_ty = ty.childType(mod); + if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; }, @@ -31236,8 +32101,9 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void } fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -31270,7 +32136,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { union_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, union_obj.srcLoc(sema.mod), @@ -31285,188 +32151,154 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // for hasRuntimeBits() of each field, so we need "requires comptime" // to be known already before this function returns. pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .enum_simple, - => false, + const mod = sema.mod; - .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => true, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .array, - .array_sentinel, - .vector, - => return sema.resolveTypeRequiresComptime(ty.childType()), - - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.resolveTypeRequiresComptime(child_ty); - } - }, - - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeRequiresComptime(ty.optionalChild(&buf)); - }, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { - return true; + return switch (ty.toIntern()) { + .empty_struct_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.child.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return mod.typeToFunc(child_ty).?.is_generic; + } else { + return sema.resolveTypeRequiresComptime(child_ty); } - } - return false; - }, + }, + .anyframe_type => |child| { + if (child == .none) return false; + return sema.resolveTypeRequiresComptime(child.toType()); + }, + .array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()), + .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), + .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), + .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .error_set_type, .inferred_error_set_type => false, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - struct_obj.requires_comptime = .yes; - } else { - struct_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, + .func_type => true, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - union_obj.requires_comptime = .yes; - } else { - union_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.resolveTypeRequiresComptime(child_ty); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + struct_obj.requires_comptime = .yes; + } else { + struct_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, field_val| { + const have_comptime_val = field_val != .none; + if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) { + return true; + } + } + return false; + }, + + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + union_obj.requires_comptime = .yes; + } else { + union_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + + .opaque_type => false, + + .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, }; } @@ -31474,40 +32306,38 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => { - const child_ty = try sema.resolveTypeFields(ty.childType()); + const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (ty.tag()) { - .@"struct" => return sema.resolveStructFully(ty), - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - + .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => return sema.resolveStructFully(ty), + .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty); + try sema.resolveTypeFully(field_ty.toType()); } }, else => {}, }, .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType()), + .Array => return sema.resolveTypeFully(ty.childType(mod)), .Optional => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeFully(ty.optionalChild(&buf)); + return sema.resolveTypeFully(ty.optionalChild(mod)); }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), + .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } - try sema.resolveTypeFully(info.return_type); + try sema.resolveTypeFully(info.return_type.toType()); }, else => {}, } @@ -31516,9 +32346,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveStructLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const payload = resolved_ty.castTag(.@"struct").?; - const struct_obj = payload.data; + const struct_obj = mod.typeToStruct(resolved_ty).?; switch (struct_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, @@ -31546,8 +32376,9 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveUnionLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -31572,30 +32403,111 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { } pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - try sema.resolveTypeFieldsStruct(ty, struct_obj); - return ty; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - try sema.resolveTypeFieldsUnion(ty, union_obj); - return ty; - }, - .type_info => return sema.getBuiltinType("Type"), - .extern_options => return sema.getBuiltinType("ExternOptions"), - .export_options => return sema.getBuiltinType("ExportOptions"), - .atomic_order => return sema.getBuiltinType("AtomicOrder"), - .atomic_rmw_op => return sema.getBuiltinType("AtomicRmwOp"), - .calling_convention => return sema.getBuiltinType("CallingConvention"), - .address_space => return sema.getBuiltinType("AddressSpace"), - .float_mode => return sema.getBuiltinType("FloatMode"), - .reduce_op => return sema.getBuiltinType("ReduceOp"), - .modifier => return sema.getBuiltinType("CallModifier"), - .prefetch_options => return sema.getBuiltinType("PrefetchOptions"), + const mod = sema.mod; - else => return ty, + switch (ty.toIntern()) { + .var_args_param_type => unreachable, + + .none => unreachable, + + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => return ty, + + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + .type_info_type => return sema.getBuiltinType("Type"), + .extern_options_type => return sema.getBuiltinType("ExternOptions"), + .export_options_type => return sema.getBuiltinType("ExportOptions"), + .atomic_order_type => return sema.getBuiltinType("AtomicOrder"), + .atomic_rmw_op_type => return sema.getBuiltinType("AtomicRmwOp"), + .calling_convention_type => return sema.getBuiltinType("CallingConvention"), + .address_space_type => return sema.getBuiltinType("AddressSpace"), + .float_mode_type => return sema.getBuiltinType("FloatMode"), + .reduce_op_type => return sema.getBuiltinType("ReduceOp"), + .call_modifier_type => return sema.getBuiltinType("CallModifier"), + .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), + + _ => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; + try sema.resolveTypeFieldsStruct(ty, struct_obj); + return ty; + }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + try sema.resolveTypeFieldsUnion(ty, union_obj); + return ty; + }, + + else => return ty, + }, } } @@ -31682,35 +32594,39 @@ fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, - ies: *Module.Fn.InferredErrorSet, + ies_index: Module.Fn.InferredErrorSet.Index, ) CompileError!void { + const mod = sema.mod; + const ies = mod.inferredErrorSetPtr(ies_index); + if (ies.is_resolved) return; - if (ies.func.state == .in_progress) { + const func = mod.funcPtr(ies.func); + if (func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which points to the same - // `*Module.Fn`. Not only is the function not relevant to the inferred error set + // each call gets a new InferredErrorSet object, which contains the same + // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl); - const ies_func_info = ies_func_owner_decl.ty.fnInfo(); + const ies_func_owner_decl = mod.declPtr(func.owner_decl); + const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. - if (ies_func_info.return_type.tag() == .generic_poison) { + if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(), msg, "generic function declared here", .{}); + try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -31722,10 +32638,11 @@ fn resolveInferredErrorSet( ies.is_resolved = true; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); for (other_ies.errors.keys()) |key| { try ies.errors.put(sema.gpa, key, {}); } @@ -31740,15 +32657,17 @@ fn resolveInferredErrorSetTy( src: LazySrcLoc, ty: Type, ) CompileError!void { - if (ty.castTag(.error_set_inferred)) |inferred| { - try sema.resolveInferredErrorSet(block, src, inferred.data); + const mod = sema.mod; + if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); } } fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = struct_obj.owner_decl; - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -31794,35 +32713,37 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -31834,13 +32755,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } struct_obj.fields = .{}; - try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); const Field = struct { type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, - type_ref: Air.Inst.Ref = .none, + type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; @@ -31885,30 +32806,30 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = if (field_name_zir) |some| - try decl_arena_allocator.dupe(u8, some) + const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| + s else - try std.fmt.allocPrint(decl_arena_allocator, "{d}", .{field_i}); + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i})); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name}); + const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; + const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; - const prev_field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }); - try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); + const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); + try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } gop.value_ptr.* = .{ - .ty = Type.initTag(.noreturn), + .ty = Type.noreturn, .abi_align = 0, - .default_val = Value.initTag(.unreachable_value), + .default_val = .none, .is_comptime = is_comptime, .offset = undefined, }; @@ -31934,7 +32855,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (zir_field.type_ref != .none) { break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31950,7 +32871,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31960,16 +32881,16 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; }; - if (field_ty.tag() == .generic_poison) { + if (field_ty.isGenericPoison()) { return error.GenericPoison; } const field = &struct_obj.fields.values()[field_i]; - field.ty = try field_ty.copy(decl_arena_allocator); + field.ty = field_ty; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31981,9 +32902,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31997,11 +32918,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field); @@ -32010,13 +32931,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty); @@ -32033,7 +32954,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32061,7 +32982,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32071,17 +32992,21 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; return sema.failWithNeededComptime(&block_scope, init_src, "struct field default value must be comptime-known"); }; - field.default_val = try default_val.copy(decl_arena_allocator); + field.default_val = try default_val.intern(field.ty, mod); } } } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } struct_obj.have_field_inits = true; } @@ -32091,8 +33016,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { defer tracy.end(); const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = union_obj.owner_decl; - const zir = union_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); @@ -32134,35 +33060,37 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { extra_index += body.len; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &union_obj.namespace, + .namespace = union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -32178,66 +33106,61 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } - try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var int_tag_ty: Type = undefined; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; - var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; + var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; + var explicit_tags_seen: []bool = &.{}; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; - if (int_tag_ty.zigTypeTag() != .Int and int_tag_ty.zigTypeTag() != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); + if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)}); } if (fields_len > 0) { - var field_count_val: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = fields_len - 1, - }; - if (!(try sema.intFitsInType(Value.initPayload(&field_count_val.base), int_tag_ty, null))) { + const field_count_val = try mod.intValue(Type.comptime_int, fields_len - 1); + if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(sema.mod), + int_tag_ty.fmt(mod), fields_len - 1, }); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); + try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len); } - union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty, union_obj); - const enum_obj = union_obj.tag_ty.castTag(.enum_numbered).?.data; - enum_field_names = &enum_obj.fields; - enum_value_map = &enum_obj.values; } else { // The provided type is the enum tag type. - union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { - return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); - } + union_obj.tag_ty = provided_ty; + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { + .enum_type => |x| x, + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), + }; // The fields of the union must match the enum exactly. - // Store a copy of the enum field names so we can check for - // missing or extraneous fields later. - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + // A flag per field is used to check for missing and extraneous fields. + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len, union_obj); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; - } - - if (fields_len == 0) { - return; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } const bits_per_field = 4; @@ -32281,17 +33204,17 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk align_ref; } else .none; - const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { + const tag_ref: Air.Inst.Ref = if (has_tag) blk: { const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(tag_ref); } else .none; - if (enum_value_map) |map| { - const copied_val = if (tag_ref != .none) blk: { + if (enum_field_vals.capacity() > 0) { + const enum_tag_val = if (tag_ref != .none) blk: { const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const val_src = union_obj.fieldSrcLoc(sema.mod, .{ + const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32302,27 +33225,22 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; last_tag_val = val; - // This puts the memory into the union arena, not the enum arena, but - // it is OK since they share the same lifetime. - break :blk try val.copy(decl_arena_allocator); + break :blk val; } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, int_tag_ty) + try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined) else - Value.zero; + try mod.intValue(int_tag_ty, 0); last_tag_val = val; - break :blk try val.copy(decl_arena_allocator); + break :blk val; }; - const gop = map.getOrPutAssumeCapacityContext(copied_val, .{ - .ty = int_tag_ty, - .mod = mod, - }); + const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern()); if (gop.found_existing) { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = gop.index }).lazy; + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { - const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)}); errdefer msg.destroy(gpa); try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -32332,19 +33250,19 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + const field_name = try ip.getOrPutString(gpa, field_name_zir); + if (enum_field_names.len != 0) { + enum_field_names[field_i] = field_name; } const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) - Type.initTag(.noreturn) + Type.noreturn else sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32354,46 +33272,54 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { else => |e| return e, }; - if (field_ty.tag() == .generic_poison) { + if (field_ty.isGenericPoison()) { return error.GenericPoison; } const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name}); + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; + const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{}'", .{ + field_name.fmt(ip), + }); errdefer msg.destroy(gpa); const prev_field_index = union_obj.fields.getIndex(field_name).?; - const prev_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{}); + const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; + try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; - const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{}' in enum '{}'", .{ + field_name.fmt(ip), union_obj.tag_ty.fmt(mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32407,11 +33333,11 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field); @@ -32420,13 +33346,13 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -32438,14 +33364,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } gop.value_ptr.* = .{ - .ty = try field_ty.copy(decl_arena_allocator), + .ty = field_ty, .abi_align = 0, }; if (align_ref != .none) { gop.value_ptr.abi_align = sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = union_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32459,22 +33385,29 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{}); errdefer msg.destroy(sema.gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{ + field_name.fmt(ip), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else if (enum_field_vals.count() > 0) { + union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_obj); + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj); } } @@ -32486,116 +33419,103 @@ fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Ty fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, - fields_len: u32, - int_ty: Type, + enum_field_names: []const InternPool.NullTerminatedString, + enum_field_vals: []const InternPool.Index, union_obj: *Module.Union, ) !Type { const mod = sema.mod; - - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_numbered }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); + const gpa = sema.gpa; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); - const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); - }; + const fqn = try union_obj.getFullyQualifiedName(mod); + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; - - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - new_decl.name_fully_qualified = true; errdefer mod.abortAnonDecl(new_decl_index); - const copied_int_ty = try int_ty.copy(new_decl_arena_allocator); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = copied_int_ty, - .fields = .{}, - .values = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = copied_int_ty, - .mod = mod, - }); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; + const new_decl = mod.declPtr(new_decl_index); + new_decl.name_fully_qualified = true; + new_decl.owns_tv = true; + new_decl.name_fully_qualified = true; + + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_vals.len == 0) + (try mod.intType(.unsigned, 0)).toIntern() + else + mod.intern_pool.typeOf(enum_field_vals[0]), + .names = enum_field_names, + .values = enum_field_vals, + .tag_mode = .explicit, + } }); + + new_decl.ty = Type.type; + new_decl.val = enum_ty.toValue(); + + try mod.finalizeAnonDecl(new_decl_index); + return enum_ty.toType(); } -fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, maybe_union_obj: ?*Module.Union) !Type { +fn generateUnionTagTypeSimple( + sema: *Sema, + block: *Block, + enum_field_names: []const InternPool.NullTerminatedString, + maybe_union_obj: ?*Module.Union, +) !Type { const mod = sema.mod; - - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_simple }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); + const gpa = sema.gpa; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { break :new_decl_index try mod.createAnonymousDecl(block, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }); }; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); - const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); - }; + const fqn = try union_obj.getFullyQualifiedName(mod); + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; + mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; + errdefer mod.abortAnonDecl(new_decl_index); + + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_names.len == 0) + (try mod.intType(.unsigned, 0)).toIntern() + else + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), + .names = enum_field_names, + .values = &.{}, + .tag_mode = .auto, + } }); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + new_decl.ty = Type.type; + new_decl.val = enum_ty.toValue(); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .fields = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; + try mod.finalizeAnonDecl(new_decl_index); + return enum_ty.toType(); } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope); + const gpa = sema.gpa; + + var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -32609,19 +33529,20 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { .is_comptime = true, }; defer { - block.instructions.deinit(sema.gpa); - block.params.deinit(sema.gpa); + block.instructions.deinit(gpa); + block.params.deinit(gpa); } const src = LazySrcLoc.nodeOffset(0); const mod = sema.mod; + const ip = &mod.intern_pool; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const opt_builtin_inst = (try sema.namespaceLookupRef( &block, src, mod.declPtr(std_file.root_decl.unwrap().?).src_namespace, - "builtin", + try ip.getOrPutString(gpa, "builtin"), )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); const builtin_inst = try sema.analyzeLoad(&block, src, opt_builtin_inst, src); const builtin_ty = sema.analyzeAsType(&block, src, builtin_inst) catch |err| switch (err) { @@ -32631,8 +33552,8 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { const opt_ty_decl = (try sema.namespaceLookup( &block, src, - builtin_ty.getNamespace().?, - name, + builtin_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, name), )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); return sema.analyzeDeclVal(&block, src, opt_ty_decl); } @@ -32640,7 +33561,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { const ty_inst = try sema.getBuiltin(name); - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -32673,341 +33594,287 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { - switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_int, - .comptime_float, - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .bool, - .type, - .anyerror, - .error_set_single, - .error_set, - .error_set_merged, - .error_union, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - .single_const_pointer_to_comptime_int, - .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .anyopaque, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .enum_literal, - .anyerror_void_error_union, - .error_set_inferred, - .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .pointer, - => return null, - - .optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, - - .@"struct" => { - const resolved_ty = try sema.resolveTypeFields(ty); - const s = resolved_ty.castTag(.@"struct").?.data; - for (s.fields.values(), 0..) |field, i| { - if (field.is_comptime) continue; - if (field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - s.srcLoc(sema.mod), - "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { + const mod = sema.mod; + return switch (ty.toIntern()) { + .empty_struct_type => Value.empty_struct, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); + } else { return null; } - } - return Value.initTag(.empty_struct_value); - }, + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; - if (is_comptime) continue; - if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; - return null; - } - return Value.initTag(.empty_struct_value); - }, + .ptr_type, + .error_union_type, + .func_type, + .anyframe_type, + .error_set_type, + .inferred_error_set_type, + => null, - .enum_numbered => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (enum_obj.tag_ty.hasRuntimeBits()) { - return null; - } - if (enum_obj.fields.count() == 1) { - if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered - } else { - return enum_obj.values.keys()[0]; + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @boolToInt(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue(); + + if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, + } })).toValue(); } - } else { return null; - } - }, - .enum_full => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (enum_obj.tag_ty.hasRuntimeBits()) { - return null; - } - switch (enum_obj.fields.count()) { - 0 => return Value.initTag(.unreachable_value), - 1 => if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + }, + .opt_type => |child| { + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { - return enum_obj.values.keys()[0]; + return null; + } + }, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => null, + + .void => Value.void, + .noreturn => Value.@"unreachable", + .null => Value.null, + .undefined => Value.undef, + + .generic_poison => return error.GenericPoison, + }, + .struct_type => |struct_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + if (mod.structPtrUnwrap(struct_type.index)) |s| { + const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count()); + for (field_vals, s.fields.values(), 0..) |*field_val, field, i| { + if (field.is_comptime) { + field_val.* = field.default_val; + continue; + } + if (field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + s.srcLoc(sema.mod), + "struct '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| { + field_val.* = try field_opv.intern(field.ty, mod); + } else return null; + } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); + } + + // In this case the struct has no fields at all and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.values) |val| { + if (val == .none) return null; + } + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values) }, + } })).toValue(); + }, + + .union_type => |union_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse + return null; + const fields = union_obj.fields.values(); + if (fields.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } + const only_field = fields[0]; + if (only_field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + union_obj.srcLoc(sema.mod), + "union '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), + } }); + return only.toValue(); + }, + .opaque_type => null, + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = int_opv.toIntern(), + } }); + return only.toValue(); + } + + return null; }, - else => return null, - } - }, - .enum_simple => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_simple = resolved_ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), - 1 => return Value.zero, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag() != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.zero; - } else { - return null; - } - }, - .@"union", .union_safety_tagged, .union_tagged => { - const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; - const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse - return null; - const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.initTag(.unreachable_value); - const only_field = fields[0]; - if (only_field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - union_obj.srcLoc(sema.mod), - "union '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse - return null; - // TODO make this not allocate. The function in `Type.onePossibleValue` - // currently returns `empty_struct_value` and we should do that here too. - return try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val_val, - }); - }, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; - .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.null, - .undefined => return Value.initTag(.undef), + switch (enum_type.names.len) { + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, + 1 => return try mod.getCoerced((if (enum_type.values.len == 0) + try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }) + else + enum_type.values[0]).toValue(), ty), + else => return null, + } + }, + }, - .int_unsigned, .int_signed => { - if (ty.cast(Type.Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.elemType())) != null) { - return Value.initTag(.the_only_possible_value); - } - return null; - }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .generic_poison => return error.GenericPoison, - } + }; } /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst); + return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, - .values = sema.air_values.items, }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - switch (ty.tag()) { - .u1 => return .u1_type, - .u8 => return .u8_type, - .i8 => return .i8_type, - .u16 => return .u16_type, - .u29 => return .u29_type, - .i16 => return .i16_type, - .u32 => return .u32_type, - .i32 => return .i32_type, - .u64 => return .u64_type, - .i64 => return .i64_type, - .u128 => return .u128_type, - .i128 => return .i128_type, - .usize => return .usize_type, - .isize => return .isize_type, - .c_short => return .c_short_type, - .c_ushort => return .c_ushort_type, - .c_int => return .c_int_type, - .c_uint => return .c_uint_type, - .c_long => return .c_long_type, - .c_ulong => return .c_ulong_type, - .c_longlong => return .c_longlong_type, - .c_ulonglong => return .c_ulonglong_type, - .c_longdouble => return .c_longdouble_type, - .f16 => return .f16_type, - .f32 => return .f32_type, - .f64 => return .f64_type, - .f80 => return .f80_type, - .f128 => return .f128_type, - .anyopaque => return .anyopaque_type, - .bool => return .bool_type, - .void => return .void_type, - .type => return .type_type, - .anyerror => return .anyerror_type, - .comptime_int => return .comptime_int_type, - .comptime_float => return .comptime_float_type, - .noreturn => return .noreturn_type, - .@"anyframe" => return .anyframe_type, - .null => return .null_type, - .undefined => return .undefined_type, - .enum_literal => return .enum_literal_type, - .atomic_order => return .atomic_order_type, - .atomic_rmw_op => return .atomic_rmw_op_type, - .calling_convention => return .calling_convention_type, - .address_space => return .address_space_type, - .float_mode => return .float_mode_type, - .reduce_op => return .reduce_op_type, - .modifier => return .modifier_type, - .prefetch_options => return .prefetch_options_type, - .export_options => return .export_options_type, - .extern_options => return .extern_options_type, - .type_info => return .type_info_type, - .manyptr_u8 => return .manyptr_u8_type, - .manyptr_const_u8 => return .manyptr_const_u8_type, - .fn_noreturn_no_args => return .fn_noreturn_no_args_type, - .fn_void_no_args => return .fn_void_no_args_type, - .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, - .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8 => return .const_slice_u8_type, - .anyerror_void_error_union => return .anyerror_void_error_union_type, - .generic_poison => return .generic_poison_type, - else => {}, - } + if (@enumToInt(ty.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.toIntern())); try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, + .tag = .interned, + .data = .{ .interned = ty.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); + const mod = sema.mod; + return sema.addConstant(ty, try mod.intValue(ty, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, Value.undef); + return sema.addConstant(ty, (try sema.mod.intern(.{ .undef = ty.toIntern() })).toValue()); } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; - const ty_inst = try sema.addType(ty); - try sema.air_values.append(gpa, val); + + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + if (std.debug.runtime_safety) { + const val_ty = mod.intern_pool.typeOf(val.toIntern()); + if (ty.toIntern() != val_ty) { + std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ + ty.fmt(mod), val_ty.toType().fmt(mod), + }); + } + } + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, + .tag = .interned, + .data = .{ .interned = val.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } @@ -33026,7 +33893,8 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { u32 => @field(extra, field.name), Air.Inst.Ref => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + InternPool.Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; @@ -33072,21 +33940,25 @@ fn analyzeComptimeAlloc( defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try var_type.copy(anon_decl.arena()), + var_type, // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. - Value.undef, + (try sema.mod.intern(.{ .undef = var_type.toIntern() })).toValue(), alignment, ); const decl = sema.mod.declPtr(decl_index); decl.@"align" = alignment; + try sema.comptime_mutable_decls.append(decl_index); try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); - return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .runtime_index = block.runtime_index, - .decl_index = decl_index, - })); + return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_type.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); } /// The places where a user can specify an address space attribute @@ -33114,8 +33986,9 @@ pub fn analyzeAddressSpace( zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { + const mod = sema.mod; const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "addresspace must be comptime-known"); - const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); + const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); const target = sema.mod.getTarget(); const arch = target.cpu.arch; @@ -33158,8 +34031,9 @@ pub fn analyzeAddressSpace( /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { - const load_ty = ptr_ty.childType(); - const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); + const mod = sema.mod; + const load_ty = ptr_ty.childType(mod); + const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty); switch (res) { .runtime_load => return null, .val => |v| return v, @@ -33185,8 +34059,9 @@ const DerefResult = union(enum) { out_of_bounds: Type, }; -fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { - const target = sema.mod.getTarget(); +fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult { + const mod = sema.mod; + const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { error.RuntimeLoad => return DerefResult{ .runtime_load = {} }, else => |e| return e, @@ -33199,19 +34074,17 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value if (coerce_in_mem_ok) { // We have a Value that lines up in virtual memory exactly with what we want to load, // and it is in-memory coercible to load_ty. It may be returned without modifications. - if (deref.is_mutable and want_mutable) { - // The decl whose value we are obtaining here may be overwritten with - // a different value upon further semantic analysis, which would - // invalidate this memory. So we must copy here. - return DerefResult{ .val = try tv.val.copy(sema.arena) }; - } - return DerefResult{ .val = tv.val }; + // Move mutable decl values to the InternPool and assert other decls are already in + // the InternPool. + const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern(); + const coerced_val = try sema.coerceValueInMemory(block, uncoerced_val.toValue(), tv.ty, load_ty, src); + return .{ .val = coerced_val }; } } // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. - if (!load_ty.hasWellDefinedLayout()) { + if (!load_ty.hasWellDefinedLayout(mod)) { return DerefResult{ .needed_well_defined = load_ty }; } @@ -33248,59 +34121,32 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. -fn typePtrOrOptionalPtrTy( - sema: *Sema, - ty: Type, - buf: *Type.Payload.ElemType, -) !?Type { - switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return ty.optionalChild(buf), - - .single_const_pointer_to_comptime_int, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return ty, - - .pointer => switch (ty.ptrSize()) { - .Slice => return null, - .C => return ty.optionalChild(buf), - else => return ty, +fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { + const mod = sema.mod; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => ty, + .Slice => null, }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .optional => { - const child_type = ty.optionalChild(buf); - if (child_type.zigTypeTag() != .Pointer) return null; - - const info = child_type.ptrInfo().data; - switch (info.size) { - .Slice, .C => return null, + .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice, .C => null, .Many, .One => { - if (info.@"allowzero") return null; + if (ptr_type.flags.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers - if ((try sema.typeHasOnePossibleValue(child_type)) != null) { + const payload_ty = opt_child.toType(); + if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } - return child_type; + return payload_ty; }, - } + }, + else => null, }, - - else => return null, - } + else => null, + }; } /// `generic_poison` will return false. @@ -33310,201 +34156,170 @@ fn typePtrOrOptionalPtrTy( /// TODO merge these implementations together with the "advanced"/opt_sema pattern seen /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .enum_simple, - => false, + const mod = sema.mod; + return switch (ty.toIntern()) { + .empty_struct_type => false, - .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => true, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .array, - .array_sentinel, - .vector, - => return sema.typeRequiresComptime(ty.childType()), - - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.typeRequiresComptime(child_ty); - } - }, - - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.typeRequiresComptime(ty.optionalChild(&buf)); - }, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { - return true; + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => return false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.child.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return mod.typeToFunc(child_ty).?.is_generic; + } else { + return sema.typeRequiresComptime(child_ty); } - } - return false; - }, + }, + .anyframe_type => |child| { + if (child == .none) return false; + return sema.typeRequiresComptime(child.toType()); + }, + .array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()), + .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), + .opt_type => |child| return sema.typeRequiresComptime(child.toType()), - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (struct_obj.status == .field_types_wip) - return false; + .error_union_type => |error_union_type| { + return sema.typeRequiresComptime(error_union_type.payload_type.toType()); + }, - try sema.resolveTypeFieldsStruct(ty, struct_obj); + .error_set_type, .inferred_error_set_type => false, - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try sema.typeRequiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; - return true; + .func_type => true, + + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsStruct(ty, struct_obj); + + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try sema.typeRequiresComptime(field.ty)) { + struct_obj.requires_comptime = .yes; + return true; + } } - } - struct_obj.requires_comptime = .no; - return false; - }, - } - }, - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (union_obj.status == .field_types_wip) + struct_obj.requires_comptime = .no; return false; - - try sema.resolveTypeFieldsUnion(ty, union_obj); - - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.typeRequiresComptime(field.ty)) { - union_obj.requires_comptime = .yes; - return true; - } + }, + } + }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) { + return true; } - union_obj.requires_comptime = .no; - return false; - }, - } - }, + } + return false; + }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.typeRequiresComptime(child_ty); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsUnion(ty, union_obj); + + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(field.ty)) { + union_obj.requires_comptime = .yes; + return true; + } + } + union_obj.requires_comptime = .no; + return false; + }, + } + }, + + .opaque_type => false, + .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, }; } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) { + const mod = sema.mod; + return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; @@ -33512,19 +34327,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { fn typeAbiSize(sema: *Sema, ty: Type) !u64 { try sema.resolveTypeLayout(ty); - const target = sema.mod.getTarget(); - return ty.abiSize(target); + return ty.abiSize(sema.mod); } fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { - const target = sema.mod.getTarget(); - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; } /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - if (field.ty.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (field.ty.zigTypeTag(mod) == .NoReturn) { return @as(u32, 0); } else if (field.abi_align == 0) { return sema.typeAbiAlignment(field.ty); @@ -33535,7 +34349,8 @@ fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const fn_info = ty.fnInfo(); + const mod = sema.mod; + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -33543,7 +34358,7 @@ pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { .Inline => return false, else => {}, } - if (try sema.typeRequiresComptime(fn_info.return_type)) { + if (try sema.typeRequiresComptime(fn_info.return_type.toType())) { return false; } return true; @@ -33553,11 +34368,12 @@ fn unionFieldIndex( sema: *Sema, block: *Block, unresolved_union_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return @intCast(u32, field_index_usize); @@ -33567,14 +34383,15 @@ fn structFieldIndex( sema: *Sema, block: *Block, unresolved_struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - if (struct_ty.isAnonStruct()) { + if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); return @intCast(u32, field_index_usize); @@ -33585,55 +34402,98 @@ fn anonStructFieldIndex( sema: *Sema, block: *Block, struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { - const anon_struct = struct_ty.castTag(.anon_struct).?.data; - for (anon_struct.names, 0..) |name, i| { - if (mem.eql(u8, name, field_name)) { - return @intCast(u32, i); - } + const mod = sema.mod; + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { + if (name == field_name) return @intCast(u32, i); + }, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + for (struct_obj.fields.keys(), 0..) |name, i| { + if (name == field_name) { + return @intCast(u32, i); + } + } + }, + else => unreachable, } - return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ - field_name, struct_ty.fmt(sema.mod), + return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ + field_name.fmt(&mod.intern_pool), struct_ty.fmt(sema.mod), }); } fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - const inst_ref = try sema.addType(ty); - try sema.types_to_resolve.append(sema.gpa, inst_ref); + try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); } -fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + var overflow: usize = undefined; + return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(sema.mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try sema.mod.vectorType(.{ + .len = ty.vectorLen(sema.mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; +} + +fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return sema.intAddScalar(lhs, rhs); + return sema.intAddScalar(lhs, rhs, ty); } -fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { + const mod = sema.mod; + if (scalar_ty.toIntern() != .comptime_int_type) { + const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33643,55 +34503,87 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; - if (ty.zigTypeTag() == .ComptimeInt) { - return sema.intAdd(lhs, rhs, ty); + if (ty.zigTypeTag(mod) == .ComptimeInt) { + return sema.intAdd(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { - return sema.floatAdd(lhs, rhs, ty); + return Value.floatAdd(lhs, rhs, ty, sema.arena, mod); } const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } -fn intSub( - sema: *Sema, - lhs: Value, - rhs: Value, - ty: Type, -) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.intSubScalar(lhs, rhs); +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + var overflow: usize = undefined; + return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(sema.mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try sema.mod.vectorType(.{ + .len = ty.vectorLen(sema.mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; } -fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); + } + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); + } + return sema.intSubScalar(lhs, rhs, ty); +} + +fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { + const mod = sema.mod; + if (scalar_ty.toIntern() != .comptime_int_type) { + const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33701,155 +34593,49 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; - if (ty.zigTypeTag() == .ComptimeInt) { - return sema.intSub(lhs, rhs, ty); + if (ty.zigTypeTag(mod) == .ComptimeInt) { + return sema.intSub(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { - return sema.floatSub(lhs, rhs, ty); + return Value.floatSub(lhs, rhs, ty, sema.arena, mod); } const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } -fn floatAdd( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - if (float_type.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType()); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.floatAddScalar(lhs, rhs, float_type); -} - -fn floatAddScalar( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const target = sema.mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val); - }, - else => unreachable, - } -} - -fn floatSub( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - if (float_type.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType()); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.floatSubScalar(lhs, rhs, float_type); -} - -fn floatSubScalar( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const target = sema.mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val); - }, - else => unreachable, - } -} - fn intSubWithOverflow( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); @@ -33861,22 +34647,22 @@ fn intSubWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = Value.boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = wrapped_result, }; } @@ -33889,15 +34675,19 @@ fn floatToInt( float_ty: Type, int_ty: Type, ) CompileError!Value { - if (float_ty.zigTypeTag() == .Vector) { - const elem_ty = float_ty.childType(); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); + const mod = sema.mod; + if (float_ty.zigTypeTag(mod) == .Vector) { + const elem_ty = float_ty.scalarType(mod); + const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); + const scalar_ty = int_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(sema.mod, i, &buf); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType()); + const elem_val = try val.elemValue(sema.mod, i); + scalar.* = try (try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod))).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = int_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.floatToIntScalar(block, src, val, float_ty, int_ty); } @@ -33935,9 +34725,9 @@ fn floatToIntScalar( float_ty: Type, int_ty: Type, ) CompileError!Value { - const Limb = std.math.big.Limb; + const mod = sema.mod; - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ int_ty.fmt(sema.mod), @@ -33952,18 +34742,14 @@ fn floatToIntScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const result_limbs = try sema.arena.dupe(Limb, big_int.toConst().limbs); - const result = if (!big_int.isPositive()) - try Value.Tag.int_big_negative.create(sema.arena, result_limbs) - else - try Value.Tag.int_big_positive.create(sema.arena, result_limbs); + const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst()); - if (!(try sema.intFitsInType(result, int_ty, null))) { + if (!(try sema.intFitsInType(cti_result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod), }); } - return result; + return mod.getCoerced(cti_result, int_ty); } /// Asserts the value is an integer, and the destination type is ComptimeInt or Int. @@ -33976,208 +34762,91 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - const target = sema.mod.getTarget(); - switch (val.tag()) { - .zero, - .undef, - .bool_false, - => return true, - - .one, - .bool_true, - => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .lazy_align => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .int_u64 => switch (ty.zigTypeTag()) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(target); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag()) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(target); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, target, sema)).fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_negative => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .the_only_possible_value => { - assert(ty.intInfo(target).bits == 0); - return true; - }, - - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); + const mod = sema.mod; + if (ty.toIntern() == .comptime_int_type) return true; + const info = ty.intInfo(mod); + switch (val.toIntern()) { + .zero_usize, .zero_u8 => return true, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => return true, + .variable, .extern_func, .func, .ptr => { + const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, .unsigned => info.bits >= ptr_bits, }; }, - .ComptimeInt => return true, + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + .lazy_align => |lazy_ty| { + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .lazy_size => |lazy_ty| { + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + }, + .aggregate => |aggregate| { + assert(ty.zigTypeTag(mod) == .Vector); + return switch (aggregate.storage) { + .bytes => |bytes| for (bytes, 0..) |byte, i| { + if (byte == 0) continue; + const actual_needed_bits = std.math.log2(byte) + 1 + @boolToInt(info.signedness == .signed); + if (info.bits >= actual_needed_bits) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + .elems, .repeated_elem => for (switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems, + .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), + }, 0..) |elem, i| { + if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + }; + }, else => unreachable, }, - - .aggregate => { - assert(ty.zigTypeTag() == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) { - if (vector_index) |some| some.* = i; - return false; - } - } - return true; - }, - - else => unreachable, } } -fn intInRange( - sema: *Sema, - tag_ty: Type, - int_val: Value, - end: usize, -) !bool { +fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { + const mod = sema.mod; if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, - }; - const end_val = Value.initPayload(&end_payload.base); + const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } /// Asserts the type is an enum. -fn enumHasInt( - sema: *Sema, - ty: Type, - int: Value, -) CompileError!bool { - switch (ty.tag()) { - .enum_nonexhaustive => unreachable, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_full.fields.count()); - } else { - return enum_full.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_obj.fields.count()); - } else { - return enum_obj.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); - return sema.intInRange(tag_ty, int, fields_len); - }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => unreachable, +fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { + const mod = sema.mod; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; + assert(enum_type.tag_mode != .nonexhaustive); + // The `tagValueIndex` function call below relies on the type being the integer tag type. + // `getCoerced` assumes the value will fit the new type. + if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; + const int_coerced = try mod.getCoerced(int, enum_type.tag_ty.toType()); - else => unreachable, - } + return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null; } fn intAddWithOverflow( @@ -34186,21 +34855,28 @@ fn intAddWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); @@ -34212,22 +34888,22 @@ fn intAddWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = Value.boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = result, }; } @@ -34243,14 +34919,13 @@ fn compareAll( rhs: Value, ty: Type, ) CompileError!bool { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) { + while (i < ty.vectorLen(mod)) : (i += 1) { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } } @@ -34267,10 +34942,13 @@ fn compareScalar( rhs: Value, ty: Type, ) CompileError!bool { + const mod = sema.mod; + const coerced_lhs = try mod.getCoerced(lhs, ty); + const coerced_rhs = try mod.getCoerced(rhs, ty); switch (op) { - .eq => return sema.valuesEqual(lhs, rhs, ty), - .neq => return !(try sema.valuesEqual(lhs, rhs, ty)), - else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema), + .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), + .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), } } @@ -34291,17 +34969,19 @@ fn compareVector( rhs: Value, ty: Type, ) !Value { - assert(ty.zigTypeTag() == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const mod = sema.mod; + assert(ty.zigTypeTag(mod) == .Vector); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()); - scalar.* = Value.makeBool(res_bool); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); + scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } /// Returns the type of a pointer to an element. @@ -34312,11 +34992,11 @@ fn compareVector( /// Handles const-ness and address spaces in particular. /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { - const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = ptr_ty.elemType2(); + const mod = sema.mod; + const ptr_info = ptr_ty.ptrInfo(mod); + const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const target = sema.mod.getTarget(); - const parent_ty = ptr_ty.childType(); + const parent_ty = ptr_ty.childType(mod); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34324,15 +35004,15 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { host_size: u16 = 0, alignment: u32 = 0, vector_index: VI = .none, - } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { - const elem_bits = elem_ty.bitSize(target); + } = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: { + const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen()), - .alignment = @intCast(u16, parent_ty.abiAlignment(target)), + .host_size = @intCast(u16, parent_ty.arrayLen(mod)), + .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; } else .{}; @@ -34366,3 +35046,42 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { .vector_index = vector_info.vector_index, }); } + +/// Merge lhs with rhs. +/// Asserts that lhs and rhs are both error sets and are resolved. +fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { + const mod = sema.mod; + const arena = sema.arena; + const lhs_names = lhs.errorSetNames(mod); + const rhs_names = rhs.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(arena, lhs_names.len); + + for (lhs_names) |name| { + names.putAssumeCapacityNoClobber(name, {}); + } + for (rhs_names) |name| { + try names.put(arena, name, {}); + } + + return mod.errorSetFromUnsortedNames(names.keys()); +} + +/// Avoids crashing the compiler when asking if inferred allocations are noreturn. +fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { + if (ref == .unreachable_value) return true; + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).isNoReturn(sema.mod); +} + +/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type. +fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; +} diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d74fbda93e..ec76b52d20 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -27,13 +27,13 @@ pub const Managed = struct { /// Assumes arena allocation. Does a recursive copy. pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue { return TypedValue{ - .ty = try self.ty.copy(arena), + .ty = self.ty, .val = try self.val.copy(arena), }; } pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool { - if (!a.ty.eql(b.ty, mod)) return false; + if (a.ty.toIntern() != b.ty.toIntern()) return false; return a.val.eql(b.val, a.ty, mod); } @@ -41,8 +41,8 @@ pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void { return tv.val.hash(tv.ty, hasher, mod); } -pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value { - return tv.val.enumToInt(tv.ty, buffer); +pub fn enumToInt(tv: TypedValue, mod: *Module) Allocator.Error!Value { + return tv.val.enumToInt(tv.ty, mod); } const max_aggregate_items = 100; @@ -61,7 +61,10 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.mod); + return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function + else => |e| return e, + }; } /// Prints the Value according to the Type, not according to the Value Tag. @@ -70,106 +73,61 @@ pub fn print( writer: anytype, level: u8, mod: *Module, -) @TypeOf(writer).Error!void { - const target = mod.getTarget(); +) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; - if (val.isVariable(mod)) - return writer.writeAll("(variable)"); + const ip = &mod.intern_pool; + while (true) switch (val.ip_index) { + .none => switch (val.tag()) { + .aggregate => return printAggregate(ty, val, writer, level, mod), + .@"union" => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const union_val = val.castTag(.@"union").?.data; + try writer.writeAll(".{ "); - while (true) switch (val.tag()) { - .u1_type => return writer.writeAll("u1"), - .u8_type => return writer.writeAll("u8"), - .i8_type => return writer.writeAll("i8"), - .u16_type => return writer.writeAll("u16"), - .i16_type => return writer.writeAll("i16"), - .u29_type => return writer.writeAll("u29"), - .u32_type => return writer.writeAll("u32"), - .i32_type => return writer.writeAll("i32"), - .u64_type => return writer.writeAll("u64"), - .i64_type => return writer.writeAll("i64"), - .u128_type => return writer.writeAll("u128"), - .i128_type => return writer.writeAll("i128"), - .isize_type => return writer.writeAll("isize"), - .usize_type => return writer.writeAll("usize"), - .c_char_type => return writer.writeAll("c_char"), - .c_short_type => return writer.writeAll("c_short"), - .c_ushort_type => return writer.writeAll("c_ushort"), - .c_int_type => return writer.writeAll("c_int"), - .c_uint_type => return writer.writeAll("c_uint"), - .c_long_type => return writer.writeAll("c_long"), - .c_ulong_type => return writer.writeAll("c_ulong"), - .c_longlong_type => return writer.writeAll("c_longlong"), - .c_ulonglong_type => return writer.writeAll("c_ulonglong"), - .c_longdouble_type => return writer.writeAll("c_longdouble"), - .f16_type => return writer.writeAll("f16"), - .f32_type => return writer.writeAll("f32"), - .f64_type => return writer.writeAll("f64"), - .f80_type => return writer.writeAll("f80"), - .f128_type => return writer.writeAll("f128"), - .anyopaque_type => return writer.writeAll("anyopaque"), - .bool_type => return writer.writeAll("bool"), - .void_type => return writer.writeAll("void"), - .type_type => return writer.writeAll("type"), - .anyerror_type => return writer.writeAll("anyerror"), - .comptime_int_type => return writer.writeAll("comptime_int"), - .comptime_float_type => return writer.writeAll("comptime_float"), - .noreturn_type => return writer.writeAll("noreturn"), - .null_type => return writer.writeAll("@Type(.Null)"), - .undefined_type => return writer.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"), - .fn_void_no_args_type => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"), - .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), - .anyframe_type => return writer.writeAll("anyframe"), - .const_slice_u8_type => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"), - - .enum_literal_type => return writer.writeAll("@Type(.EnumLiteral)"), - .manyptr_u8_type => return writer.writeAll("[*]u8"), - .manyptr_const_u8_type => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"), - .atomic_order_type => return writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op_type => return writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention_type => return writer.writeAll("std.builtin.CallingConvention"), - .address_space_type => return writer.writeAll("std.builtin.AddressSpace"), - .float_mode_type => return writer.writeAll("std.builtin.FloatMode"), - .reduce_op_type => return writer.writeAll("std.builtin.ReduceOp"), - .modifier_type => return writer.writeAll("std.builtin.CallModifier"), - .prefetch_options_type => return writer.writeAll("std.builtin.PrefetchOptions"), - .export_options_type => return writer.writeAll("std.builtin.ExportOptions"), - .extern_options_type => return writer.writeAll("std.builtin.ExternOptions"), - .type_info_type => return writer.writeAll("std.builtin.Type"), - - .empty_struct_value, .aggregate => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - if (ty.zigTypeTag() == .Struct) { - try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + try print(.{ + .ty = mod.unionPtr(ip.indexToKey(ty.toIntern()).union_type.index).tag_ty, + .val = union_val.tag, + }, writer, level - 1, mod); + try writer.writeAll(" = "); + try print(.{ + .ty = ty.unionFieldType(union_val.tag, mod), + .val = union_val.val, + }, writer, level - 1, mod); + return writer.writeAll(" }"); + }, + .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, - } - try print(.{ - .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, i), - }, writer, level - 1, mod); + try print(elem_tv, writer, level - 1, mod); } - if (ty.structFieldCount() > max_aggregate_items) { + if (len > max_aggregate_items) { try writer.writeAll(", ..."); } - return writer.writeAll("}"); - } else { - const elem_ty = ty.elemType2(); - const len = ty.arrayLen(); + return writer.writeAll(" }"); + }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -177,11 +135,14 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, i); - if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str; + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + if (elem_val.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } + // TODO would be nice if this had a bit of unicode awareness. const truncated = if (len > max_string_len) " (truncated)" else ""; return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); } @@ -192,315 +153,334 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, i), + .val = elem_val, }, writer, level - 1, mod); } if (len > max_aggregate_items) { try writer.writeAll(", ..."); } return writer.writeAll(" }"); - } + }, + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(mod); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + }, }, - .@"union" => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const union_val = val.castTag(.@"union").?.data; - try writer.writeAll(".{ "); - - try print(.{ - .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, - .val = union_val.tag, - }, writer, level - 1, mod); - try writer.writeAll(" = "); - try print(.{ - .ty = ty.unionFieldType(union_val.tag, mod), - .val = union_val.val, - }, writer, level - 1, mod); - - return writer.writeAll(" }"); - }, - .null_value => return writer.writeAll("null"), - .undef => return writer.writeAll("undefined"), - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), - .void_value => return writer.writeAll("{}"), - .unreachable_value => return writer.writeAll("unreachable"), - .the_only_possible_value => return writer.writeAll("0"), - .bool_true => return writer.writeAll("true"), - .bool_false => return writer.writeAll("false"), - .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return writer.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(target); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(target); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + else => switch (ip.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => return Type.print(val.toType(), writer, mod), + .undef => return writer.writeAll("undefined"), + .runtime_value => return writer.writeAll("(runtime value)"), + .simple_value => |simple_value| switch (simple_value) { + .empty_struct => return printAggregate(ty, val, writer, level, mod), + .generic_poison => return writer.writeAll("(generic poison)"), + else => return writer.writeAll(@tagName(simple_value)), + }, + .variable => return writer.writeAll("(variable)"), + .extern_func => |extern_func| return writer.print("(extern function '{}')", .{ + mod.declPtr(extern_func.decl).name.fmt(ip), + }), + .func => |func| return writer.print("(function '{}')", .{ + mod.declPtr(mod.funcPtr(func.index).owner_decl).name.fmt(ip), + }), + .int => |int| switch (int.storage) { + inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + .lazy_align => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiAlignment(mod), + }), + .lazy_size => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiSize(mod), + }), + }, + .err => |err| return writer.print("error.{}", .{ + err.name.fmt(ip), + }), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return writer.print("error.{}", .{ + err_name.fmt(ip), + }), + .payload => |payload| { + val = payload.toValue(); + ty = ty.errorUnionPayload(mod); + }, + }, + .enum_literal => |enum_literal| return writer.print(".{}", .{ + enum_literal.fmt(ip), + }), + .enum_tag => |enum_tag| { + if (level == 0) { + return writer.writeAll("(enum)"); + } + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| { + try writer.print(".{i}", .{enum_type.names[tag_index].fmt(ip)}); + return; + } + try writer.writeAll("@intToEnum("); try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, + .ty = Type.type, + .val = enum_tag.ty.toValue(), }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + try writer.writeAll(", "); try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, + .ty = ip.typeOf(enum_tag.int).toType(), + .val = enum_tag.int.toValue(), }, writer, level - 1, mod); - } + try writer.writeAll(")"); + return; + }, + .empty_enum_value => return writer.writeAll("(empty enum value)"), + .float => |float| switch (float.storage) { + inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}), + }, + .ptr => |ptr| { + if (ptr.addr == .int) { + const i = ip.indexToKey(ptr.addr.int).int; + switch (i.storage) { + inline else => |addr| return writer.print("{x:0>8}", .{addr}), + } + } - if (field_ptr.container_ty.zigTypeTag() == .Struct) { - switch (field_ptr.container_ty.tag()) { - .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), - else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); - return writer.print(".{s}", .{field_name}); + const ptr_ty = ip.indexToKey(ty.toIntern()).ptr_type; + if (ptr_ty.flags.size == .Slice) { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const elem_ty = ptr_ty.child.toType(); + const len = ptr.len.toValue().toUnsignedInt(mod); + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @min(len, max_string_len); + var buf: [max_string_len]u8 = undefined; + for (buf[0..max_len], 0..) |*c, i| { + const elem = try val.elemValue(mod, i); + if (elem.isUndef(mod)) break :str; + c.* = @intCast(u8, elem.toUnsignedInt(mod)); + } + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + try writer.writeAll(".{ "); + const max_len = @min(len, max_aggregate_items); + for (0..max_len) |i| { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = try val.elemValue(mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } + + switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + if (level == 0) return writer.print("(decl '{}')", .{decl.name.fmt(ip)}); + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + if (level == 0) return writer.print("(mut decl '{}')", .{decl.name.fmt(ip)}); + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .comptime_field => |field_val_ip| { + return print(.{ + .ty = ip.typeOf(field_val_ip).toType(), + .val = field_val_ip.toValue(), + }, writer, level - 1, mod); + }, + .int => unreachable, + .eu_payload => |eu_ip| { + try writer.writeAll("(payload of "); + try print(.{ + .ty = ip.typeOf(eu_ip).toType(), + .val = eu_ip.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(")"); + }, + .opt_payload => |opt_ip| { + try print(.{ + .ty = ip.typeOf(opt_ip).toType(), + .val = opt_ip.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(".?"); + }, + .elem => |elem| { + try print(.{ + .ty = ip.typeOf(elem.base).toType(), + .val = elem.base.toValue(), + }, writer, level - 1, mod); + try writer.print("[{}]", .{elem.index}); + }, + .field => |field| { + const container_ty = ip.typeOf(field.base).toType(); + try print(.{ + .ty = container_ty, + .val = field.base.toValue(), + }, writer, level - 1, mod); + + switch (container_ty.zigTypeTag(mod)) { + .Struct => { + if (container_ty.isTuple(mod)) { + try writer.print("[{d}]", .{field.index}); + } + const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod); + try writer.print(".{i}", .{field_name.fmt(ip)}); + }, + .Union => { + const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; + try writer.print(".{i}", .{field_name.fmt(ip)}); + }, + .Pointer => { + std.debug.assert(container_ty.isSlice(mod)); + try writer.writeAll(switch (field.index) { + Value.slice_ptr_index => ".ptr", + Value.slice_len_index => ".len", + else => unreachable, + }); + }, + else => unreachable, + } }, } - } else if (field_ptr.container_ty.zigTypeTag() == .Union) { - const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice()) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } + }, + .opt => |opt| switch (opt.val) { + .none => return writer.writeAll("null"), + else => |payload| { + val = payload.toValue(); + ty = ty.optionalChild(mod); + }, + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| { + // Strip the 0 sentinel off of strings before printing + const zero_sent = blk: { + const sent = ty.sentinel(mod) orelse break :blk false; + break :blk sent.eql(Value.zero_u8, Type.u8, mod); + }; + const str = if (zero_sent) bytes[0 .. bytes.len - 1] else bytes; + return writer.print("\"{}\"", .{std.zig.fmtEscapes(str)}); + }, + .elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod), + }, + .un => |un| { + try writer.writeAll(".{ "); + if (level > 0) { + try print(.{ + .ty = ty.unionTagTypeHypothetical(mod), + .val = un.tag.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(" = "); + try print(.{ + .ty = ty.unionFieldType(un.tag.toValue(), mod), + .val = un.val.toValue(), + }, writer, level - 1, mod); + } else try writer.writeAll("..."); + return writer.writeAll(" }"); + }, + .memoized_call => unreachable, }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); - }, - .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); - }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(), - .val = ty.sentinel().?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(); - const len = payload.len.toUnsignedInt(target); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); - if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } - - try writer.writeAll(".{ "); - - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; - try print(.{ - .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), - }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), - .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), - .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), - .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), - .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - var buf: Type.Payload.ElemType = undefined; - ty = ty.optionalChild(&buf); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - - const data = val.castTag(.eu_payload_ptr).?.data; - - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - .opt_payload_ptr => { - const data = val.castTag(.opt_payload_ptr).?.data; - - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - - // TODO these should not appear in this function - .inferred_alloc => return writer.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .generic_poison_type => return writer.writeAll("(generic poison type)"), - .generic_poison => return writer.writeAll("(generic poison)"), - .runtime_value => return writer.writeAll("[runtime value]"), }; } + +fn printAggregate( + ty: Type, + val: Value, + writer: anytype, + level: u8, + mod: *Module, +) (@TypeOf(writer).Error || Allocator.Error)!void { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + if (ty.zigTypeTag(mod) == .Struct) { + try writer.writeAll(".{"); + const max_len = @min(ty.structFieldCount(mod), max_aggregate_items); + + for (0..max_len) |i| { + if (i != 0) try writer.writeAll(", "); + + const field_name = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(), + .anon_struct_type => |x| if (x.isTuple()) .none else x.names[i].toOptional(), + else => unreachable, + }; + + if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(&mod.intern_pool)}); + try print(.{ + .ty = ty.structFieldType(i, mod), + .val = try val.fieldValue(mod, i), + }, writer, level - 1, mod); + } + if (ty.structFieldCount(mod) > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll("}"); + } else { + const elem_ty = ty.elemType2(mod); + const len = ty.arrayLen(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem = try val.fieldValue(mod, i); + if (elem.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + } + + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = try val.fieldValue(mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } +} diff --git a/src/Zir.zig b/src/Zir.zig index 2bd5b21f79..c3a5f8e09b 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @This(); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -2041,448 +2042,103 @@ pub const Inst = struct { /// The position of a ZIR instruction within the `Zir` instructions array. pub const Index = u32; - /// A reference to a TypedValue or ZIR instruction. + /// A reference to ZIR instruction, or to an InternPool index, or neither. /// - /// If the Ref has a tag in this enum, it refers to a TypedValue. - /// - /// If the value of a Ref does not have a tag, it refers to a ZIR instruction. - /// - /// The first values after the the last tag refer to ZIR instructions which may - /// be derived by subtracting `typed_value_map.len`. - /// - /// When adding a tag to this enum, consider adding a corresponding entry to - /// `primitives` in astgen. + /// If the integer tag value is < InternPool.static_len, then it + /// corresponds to an InternPool index. Otherwise, this refers to a ZIR + /// instruction. /// /// The tag type is specified so that it is safe to bitcast between `[]u32` /// and `[]Ref`. pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + zero_u8 = @enumToInt(InternPool.Index.zero_u8), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), + negative_one = @enumToInt(InternPool.Index.negative_one), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This tag is here to match Air and InternPool, however it is unused + /// for ZIR purposes. + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none, - - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - anyerror_void_error_union_type, - generic_poison_type, - - /// `undefined` (untyped) - undef, - /// `0` (comptime_int) - zero, - /// `1` (comptime_int) - one, - /// `{}` - void_value, - /// `unreachable` (noreturn type) - unreachable_value, - /// `null` (untyped) - null_value, - /// `true` - bool_true, - /// `false` - bool_false, - /// `.{}` (untyped) - empty_struct, - /// `0` (usize) - zero_usize, - /// `1` (usize) - one_usize, - /// `std.builtin.CallingConvention.C` - calling_convention_c, - /// `std.builtin.CallingConvention.Inline` - calling_convention_inline, - /// Used for generic parameters where the type and value - /// is not known until generic function instantiation. - generic_poison, - /// This is a special type for variadic parameters of a function call. - /// Casts to it will validate that the type can be passed to a c - /// calling convention function. - var_args_param, - + none = @enumToInt(InternPool.Index.none), _, - - pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{ - .none = undefined, - - .u1_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u1_type), - }, - .u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u8_type), - }, - .i8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i8_type), - }, - .u16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u16_type), - }, - .i16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i16_type), - }, - .u29_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u29_type), - }, - .u32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u32_type), - }, - .i32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i32_type), - }, - .u64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u64_type), - }, - .i64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i64_type), - }, - .u128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u128_type), - }, - .i128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i128_type), - }, - .usize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.usize_type), - }, - .isize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.isize_type), - }, - .c_char_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_char_type), - }, - .c_short_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_short_type), - }, - .c_ushort_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ushort_type), - }, - .c_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_int_type), - }, - .c_uint_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_uint_type), - }, - .c_long_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_long_type), - }, - .c_ulong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulong_type), - }, - .c_longlong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longlong_type), - }, - .c_ulonglong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulonglong_type), - }, - .c_longdouble_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longdouble_type), - }, - .f16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f16_type), - }, - .f32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f32_type), - }, - .f64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f64_type), - }, - .f80_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f80_type), - }, - .f128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f128_type), - }, - .anyopaque_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyopaque_type), - }, - .bool_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.bool_type), - }, - .void_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.void_type), - }, - .type_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_type), - }, - .anyerror_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }, - .comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_int_type), - }, - .comptime_float_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_float_type), - }, - .noreturn_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.noreturn_type), - }, - .anyframe_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyframe_type), - }, - .null_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.null_type), - }, - .undefined_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.undefined_type), - }, - .fn_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_noreturn_no_args_type), - }, - .fn_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_void_no_args_type), - }, - .fn_naked_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_naked_noreturn_no_args_type), - }, - .fn_ccc_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_ccc_void_no_args_type), - }, - .single_const_pointer_to_comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.single_const_pointer_to_comptime_int_type), - }, - .const_slice_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.const_slice_u8_type), - }, - .anyerror_void_error_union_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_void_error_union_type), - }, - .generic_poison_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.generic_poison_type), - }, - .enum_literal_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.enum_literal_type), - }, - .manyptr_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_u8_type), - }, - .manyptr_const_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_const_u8_type), - }, - .atomic_order_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_order_type), - }, - .atomic_rmw_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_rmw_op_type), - }, - .calling_convention_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.calling_convention_type), - }, - .address_space_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.address_space_type), - }, - .float_mode_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.float_mode_type), - }, - .reduce_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.reduce_op_type), - }, - .modifier_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.modifier_type), - }, - .prefetch_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.prefetch_options_type), - }, - .export_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.export_options_type), - }, - .extern_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.extern_options_type), - }, - .type_info_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_info_type), - }, - - .undef = .{ - .ty = Type.initTag(.undefined), - .val = Value.initTag(.undef), - }, - .zero = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.zero), - }, - .zero_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.zero), - }, - .one = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.one), - }, - .one_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.one), - }, - .void_value = .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }, - .unreachable_value = .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }, - .null_value = .{ - .ty = Type.initTag(.null), - .val = Value.initTag(.null_value), - }, - .bool_true = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_true), - }, - .bool_false = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_false), - }, - .empty_struct = .{ - .ty = Type.initTag(.empty_struct_literal), - .val = Value.initTag(.empty_struct_value), - }, - .calling_convention_c = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_c_payload.base }, - }, - .calling_convention_inline = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_inline_payload.base }, - }, - .generic_poison = .{ - .ty = Type.initTag(.generic_poison), - .val = Value.initTag(.generic_poison), - }, - .var_args_param = undefined, - }); - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_c_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.C), - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_inline_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.Inline), }; /// All instructions have an 8-byte payload, which is contained within @@ -4163,13 +3819,14 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + assert(inst != .none); const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; @@ -4177,3 +3834,8 @@ pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { return null; } } + +pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { + if (inst == .none) return null; + return refToIndex(inst); +} diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 971ed4749d..bf945e6983 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -328,7 +328,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -339,6 +339,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -471,7 +472,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! _ = try self.addInst(.{ @@ -520,10 +522,10 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -652,13 +654,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -842,8 +845,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -916,8 +918,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -951,8 +952,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1026,31 +1027,31 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1066,7 +1067,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1078,14 +1079,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.compare_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .compare_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1093,7 +1094,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1125,9 +1126,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const raw_reg = try self.register_manager.allocReg(reg_owner, gp); - const ty = self.air.typeOfIndex(reg_owner); + const ty = self.typeOfIndex(reg_owner); const reg = self.registerAlias(raw_reg, ty); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1137,17 +1138,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ret_ty = self.fn_type.fnReturnType(mod); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -1177,13 +1175,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); - const operand_ty = self.air.typeOf(operand); - const operand_info = operand_ty.intInfo(self.target.*); + const operand_ty = self.typeOf(operand); + const operand_info = operand_ty.intInfo(mod); - const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(self.target.*); + const dest_ty = self.typeOfIndex(inst); + const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { const operand_lock: ?RegisterLock = switch (operand_mcv) { @@ -1199,14 +1198,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (dest_info.bits > operand_info.bits) { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } else { if (self.reuseOperand(inst, operand, 0, truncated)) { break :result truncated; } else { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } } @@ -1257,8 +1256,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -1300,8 +1300,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -1319,15 +1319,16 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, .compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { // TODO convert this to mvn + and const op_reg = switch (operand) { @@ -1361,7 +1362,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -1413,13 +1414,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1488,8 +1489,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1508,9 +1509,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -1907,12 +1908,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -1968,11 +1969,11 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -1999,7 +2000,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div_float", .{}), .Vector => return self.fail("TODO div_float on vectors", .{}), else => unreachable, @@ -2015,12 +2017,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2049,12 +2051,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2082,12 +2084,12 @@ fn divExact( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2118,12 +2120,12 @@ fn rem( _ = maybe_inst; const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO rem/mod on floats", .{}), .Vector => return self.fail("TODO rem/mod on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -2188,7 +2190,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO mod on floats", .{}), .Vector => return self.fail("TODO mod on vectors", .{}), .Int => return self.fail("TODO mod on ints", .{}), @@ -2205,10 +2208,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -2240,11 +2244,11 @@ fn bitwise( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO implement bitwise operations with immediates const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2274,10 +2278,11 @@ fn shiftExact( ) InnerError!MCValue { _ = rhs_ty; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -2323,10 +2328,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -2362,7 +2368,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema @@ -2388,17 +2395,17 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -2426,8 +2433,8 @@ fn ptrArithmetic( fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2477,8 +2484,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2511,23 +2518,23 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...31, 33...63 => { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2565,7 +2572,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; }, @@ -2639,24 +2646,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const mod = self.bin_file.options.module.?; - const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2709,7 +2715,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 64) { @@ -2849,7 +2855,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else return self.fail("TODO implement mul_with_overflow for integers > u64/i64", .{}); @@ -2864,21 +2870,22 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2981,7 +2988,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -3003,7 +3010,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOf(ty_op.operand); + const optional_ty = self.typeOf(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand); break :result try self.optionalPayload(inst, mcv, optional_ty); }; @@ -3011,10 +3018,10 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { } fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - if (optional_ty.isPtrLikeOptional()) { + const mod = self.bin_file.options.module.?; + const payload_ty = optional_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3055,16 +3062,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3086,7 +3094,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3120,7 +3128,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -3134,16 +3142,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3165,10 +3174,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ // Set both registers to the X variant to get the full width @@ -3199,7 +3208,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -3245,6 +3254,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { @@ -3252,12 +3262,12 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } const result: MCValue = result: { - const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) { + const payload_ty = self.typeOf(ty_op.operand); + if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -3265,7 +3275,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we check if we can reuse the operand? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3273,9 +3283,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); - const optional_abi_align = optional_ty.abiAlignment(self.target.*); - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_align = optional_ty.abiAlignment(mod); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3289,19 +3299,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -3314,17 +3325,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -3416,11 +3428,11 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const slice_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); @@ -3440,8 +3452,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3465,8 +3478,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -3481,9 +3494,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3499,8 +3513,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -3597,8 +3611,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -3753,14 +3768,14 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -3844,15 +3859,16 @@ fn genInlineMemsetCode( } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_ty = self.typeOfIndex(inst); + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -3867,18 +3883,19 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, 4 => .ldr_immediate, 8 => .ldr_immediate, 3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}), @@ -3896,7 +3913,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb_immediate, @@ -3917,8 +3935,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; log.debug("store: storing {} to {}", .{ value, ptr }); - const abi_size = value_ty.abiSize(self.target.*); + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4046,8 +4065,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -4069,10 +4088,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const ptr_ty = self.typeOf(operand); + const struct_ty = ptr_ty.childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4093,10 +4113,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_ty = self.typeOf(operand); + const struct_field_ty = struct_ty.structFieldType(index, mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -4142,12 +4163,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4169,7 +4191,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4222,11 +4244,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4245,18 +4268,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_ty = fn_ty.fnReturnType(mod); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(ret_ptr_reg, null); try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset }); @@ -4268,7 +4287,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4289,21 +4308,18 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (try self.air.value(callee, mod)) |func_value| { + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4312,7 +4328,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4326,17 +4342,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = fn_got_addr }); } else unreachable; _ = try self.addInst(.{ .tag = .blr, .data = .{ .reg = .x30 }, }); - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4352,7 +4367,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .import, .sym_index = sym_index, @@ -4369,7 +4384,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .x30, mcv); @@ -4407,14 +4422,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4425,11 +4441,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, @@ -4442,10 +4454,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ptr_ty = self.typeOf(un_op); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4465,8 +4478,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4485,7 +4498,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4501,29 +4514,28 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillCompareFlagsIfOccupied(); @@ -4609,8 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -4625,7 +4638,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4687,8 +4700,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4777,7 +4790,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4804,7 +4817,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4819,13 +4832,13 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + const mod = self.bin_file.options.module.?; + const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { + const payload_ty = operand_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4838,7 +4851,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.asr_immediate else Mir.Inst.Tag.lsr_immediate, @@ -4875,9 +4888,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { - const error_type = error_union_ty.errorUnionSet(); + const mod = self.bin_file.options.module.?; + const error_type = error_union_ty.errorUnionSet(mod); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } @@ -4908,7 +4922,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(.{ .mcv = operand }, operand_ty); }; @@ -4916,11 +4930,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4934,7 +4949,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(.{ .mcv = operand }, operand_ty); }; @@ -4942,11 +4957,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4960,7 +4976,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4968,11 +4984,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4986,7 +5003,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4994,11 +5011,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5065,7 +5083,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5210,9 +5228,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5220,14 +5239,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .compare_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5293,7 +5312,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5386,7 +5405,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5441,11 +5461,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_ty = ty.structFieldType(1, mod); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5478,11 +5498,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5559,6 +5575,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5669,13 +5686,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLdrRegister(reg, reg.toX(), ty); }, .stack_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, 4, 8 => .ldr_stack, else => unreachable, // unexpected abi size }; @@ -5693,13 +5710,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 4, 8 => .ldr_stack_argument, else => unreachable, // unexpected abi size }; @@ -5720,7 +5737,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5728,7 +5746,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -5798,11 +5816,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5913,7 +5927,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5922,19 +5936,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -6044,8 +6059,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -6087,14 +6103,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_ty = self.typeOf(pl_op.operand); + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6123,37 +6140,32 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + const inst_ty = self.typeOf(inst); + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = (try self.air.value(inst, mod)).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } @@ -6208,12 +6220,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6221,7 +6232,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6236,14 +6247,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) }; @@ -6252,8 +6263,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6261,14 +6272,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) { + if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { if (param_size <= 8) { - result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) }; + result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6279,7 +6290,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(self.target.*) == 8) { + if (ty.toType().abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6294,14 +6305,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 16; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) }; @@ -6317,10 +6328,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6371,7 +6382,8 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(self: *Self, reg: Register, ty: Type) Register { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (reg.class()) { .general_purpose => { @@ -6397,3 +6409,13 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { }, } } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 0c48f33ea1..72a6172895 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -4,6 +4,7 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -14,44 +15,44 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *Module) Class { + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + if (ty.containerLayout(mod) == .Packed) return .byval; + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Union => { - if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + if (ty.containerLayout(mod) == .Packed) return .byval; + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (bit_size > 128) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, @@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u8); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u8 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > sret_float_count) return invalid; @@ -88,12 +90,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_ty = ty.structFieldType(i, mod); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > sret_float_count) return invalid; @@ -113,21 +115,21 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type) ?Type { - switch (ty.zigTypeTag()) { +pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { + switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { - if (getFloatArrayType(field.ty)) |some| return some; + if (getFloatArrayType(field.ty, mod)) |some| return some; } return null; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); - if (getFloatArrayType(field_ty)) |some| return some; + const field_ty = ty.structFieldType(i, mod); + if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; }, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bdc1627bd6..69a156999b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -334,7 +334,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -345,6 +345,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -477,7 +478,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // push {fp, lr} const push_reloc = try self.addNop(); @@ -518,10 +520,10 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -636,13 +638,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -826,8 +829,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -900,8 +902,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -937,8 +938,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1006,9 +1007,10 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -1016,22 +1018,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1049,7 +1050,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1063,14 +1064,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1080,7 +1081,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1114,17 +1115,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ret_ty = self.fn_type.fnReturnType(mod); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -1150,18 +1148,19 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); - const operand_abi_size = operand_ty.abiSize(self.target.*); - const dest_abi_size = dest_ty.abiSize(self.target.*); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const operand_abi_size = operand_ty.abiSize(mod); + const dest_abi_size = dest_ty.abiSize(mod); + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); const dst_mcv: MCValue = blk: { if (info_a.bits == info_b.bits) { @@ -1215,8 +1214,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 32) { if (info_a.bits > 32) { @@ -1259,8 +1259,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); @@ -1278,15 +1278,16 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1319,7 +1320,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 32) { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1373,13 +1374,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1463,8 +1464,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1483,9 +1484,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -1497,8 +1498,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1548,8 +1549,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1582,23 +1583,23 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits < 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1631,7 +1632,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { @@ -1695,23 +1696,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 16) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1744,7 +1745,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 32) { @@ -1842,7 +1843,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); // strb rdlo, [...] - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = rdlo }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .register = rdlo }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -1859,19 +1860,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1976,7 +1978,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -2014,10 +2016,11 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); + const optional_ty = self.typeOfIndex(inst); + const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2036,16 +2039,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2067,7 +2071,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -2098,7 +2102,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -2112,16 +2116,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2143,10 +2148,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2174,7 +2179,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -2221,19 +2226,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -2244,19 +2250,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -2360,8 +2367,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { 1, 4 => { @@ -2418,11 +2426,11 @@ fn ptrElemVal( } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const slice_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); @@ -2445,8 +2453,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -2461,7 +2469,8 @@ fn arrayElemVal( array_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const elem_ty = array_ty.childType(); + const mod = self.bin_file.options.module.?; + const elem_ty = array_ty.childType(mod); const mcv = try array_bind.resolveToMcv(self); switch (mcv) { @@ -2495,11 +2504,7 @@ fn arrayElemVal( const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(elem_ty); return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); }, @@ -2512,7 +2517,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); }; @@ -2520,9 +2525,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2538,8 +2544,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -2646,8 +2652,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2722,19 +2729,20 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; const dest_mcv: MCValue = blk: { - const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + const ptr_fits_dest = elem_ty.abiSize(mod) <= 4; if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -2742,7 +2750,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dest_mcv, ptr, self.typeOf(ty_op.operand)); break :result dest_mcv; }; @@ -2750,7 +2758,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const elem_size = @intCast(u32, value_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, value_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2846,8 +2855,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2869,10 +2878,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const ptr_ty = self.typeOf(operand); + const struct_ty = ptr_ty.childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2892,11 +2902,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); - const struct_field_ty = struct_ty.structFieldType(index); + const struct_ty = self.typeOf(operand); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { .dead, .unreach => unreachable, @@ -2959,10 +2970,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2981,17 +2992,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); - if (struct_ty.zigTypeTag() == .Union) { + if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3375,12 +3387,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3431,12 +3443,12 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -3463,7 +3475,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), else => unreachable, @@ -3479,12 +3492,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3522,12 +3535,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3569,7 +3582,8 @@ fn divExact( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM div_exact", .{}), @@ -3586,12 +3600,12 @@ fn rem( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3654,7 +3668,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM mod", .{}), @@ -3671,10 +3686,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -3708,12 +3724,12 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3753,16 +3769,17 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .shr_exact => switch (lhs_ty.intInfo(mod).signedness) { .signed => Mir.Inst.Tag.asr, .unsigned => Mir.Inst.Tag.lsr, }, @@ -3791,10 +3808,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -3833,7 +3851,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3866,17 +3885,17 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -3903,11 +3922,12 @@ fn ptrArithmetic( } fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; @@ -3924,7 +3944,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } }; const data: Mir.Inst.Data = switch (abi_size) { - 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset, + 1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset, 2 => rr_extra_offset, 3, 4 => rr_offset, else => unreachable, @@ -3937,7 +3957,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -4051,14 +4072,14 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4143,7 +4164,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4196,11 +4217,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4225,16 +4247,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // untouched by the parameter passing code const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_ty = fn_ty.fnReturnType(mod); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(.r0, null); try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset }); @@ -4249,7 +4267,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4270,16 +4288,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (try self.air.value(callee, mod)) |func_value| { + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO } else { @@ -4288,16 +4304,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier @tagName(self.target.cpu.arch), }); } - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (func_value.getExternFunc(mod)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); - try self.genSetReg(Type.initTag(.usize), .lr, mcv); + try self.genSetReg(Type.usize, .lr, mcv); } // TODO: add Instruction.supportedOn @@ -4329,7 +4345,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (RegisterManager.indexOfRegIntoTracked(reg) == null) { // Save function return value into a tracked register log.debug("airCall: copying {} as it is not tracked", .{reg}); - const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value); + const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value); break :result MCValue{ .register = new_reg }; } }, @@ -4353,14 +4369,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4371,11 +4388,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, // invalid return result @@ -4388,10 +4401,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ptr_ty = self.typeOf(un_op); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4411,8 +4425,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4432,7 +4446,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4448,29 +4462,28 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); @@ -4555,8 +4568,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -4571,7 +4585,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4636,8 +4650,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4726,7 +4740,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4753,7 +4767,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4772,8 +4786,9 @@ fn isNull( operand_bind: ReadArg.Bind, operand_ty: Type, ) !MCValue { - if (operand_ty.isPtrLikeOptional()) { - assert(operand_ty.abiSize(self.target.*) == 4); + const mod = self.bin_file.options.module.?; + if (operand_ty.isPtrLikeOptional(mod)) { + assert(operand_ty.abiSize(mod) == 4); const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; return self.cmp(operand_bind, imm_bind, Type.usize, .eq); @@ -4797,7 +4812,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(operand_bind, operand_ty); }; @@ -4805,11 +4820,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4823,7 +4839,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(operand_bind, operand_ty); }; @@ -4831,11 +4847,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4850,9 +4867,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { - const error_type = error_union_ty.errorUnionSet(); + const mod = self.bin_file.options.module.?; + const error_type = error_union_ty.errorUnionSet(mod); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } @@ -4883,7 +4901,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4891,11 +4909,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4909,7 +4928,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4917,11 +4936,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4988,7 +5008,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5131,9 +5151,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5141,14 +5162,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5212,7 +5233,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5301,7 +5322,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5332,7 +5354,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5355,7 +5377,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, @@ -5378,11 +5400,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); - const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_ty = ty.structFieldType(1, mod); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5420,11 +5442,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5466,6 +5484,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5640,17 +5659,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; const extra_offset = switch (abi_size) { - 1 => ty.isSignedInt(), + 1 => ty.isSignedInt(mod), 2 => true, 3, 4 => false, else => unreachable, @@ -5659,7 +5678,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ .tag = tag, @@ -5675,7 +5694,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { const offset = if (off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ .tag = tag, @@ -5691,11 +5710,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 3, 4 => .ldr_stack_argument, else => unreachable, }; @@ -5712,7 +5731,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5732,7 +5752,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5752,7 +5772,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, @@ -5779,11 +5799,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5862,7 +5878,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5871,16 +5887,17 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -5989,8 +6006,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -6038,9 +6056,10 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_ty = self.typeOf(pl_op.operand); + const mod = self.bin_file.options.module.?; + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6069,37 +6088,32 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + const inst_ty = self.typeOf(inst); + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = (try self.air.value(inst, mod)).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } @@ -6152,12 +6166,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6165,7 +6178,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6180,12 +6193,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6199,11 +6212,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(self.target.*) == 8) + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6215,7 +6228,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(self.target.*) == 8) + if (ty.toType().abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6227,14 +6240,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 8; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; @@ -6249,10 +6262,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6301,3 +6314,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 8b9ec45e24..e4a07f22bf 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -1,8 +1,10 @@ const std = @import("std"); +const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -22,28 +24,28 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; const max_byval_size = 512; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - const fields = ty.structFieldCount(); + const fields = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i); - const field_alignment = ty.structFieldAlign(i, target); - const field_size = field_ty.bitSize(target); + const field_ty = ty.structFieldType(i, mod); + const field_alignment = ty.structFieldAlign(i, mod); + const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { return Class.arrSize(bit_size, 64); } @@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { return Class.arrSize(bit_size, 32); }, .Union => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - for (ty.unionFields().values()) |field| { - if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) { + for (ty.unionFields(mod).values()) |field| { + if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } } @@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { .Int => { // TODO this is incorrect for _BitInt(128) but implementing // this correctly makes implementing compiler-rt impossible. - // const bit_size = ty.bitSize(target); + // const bit_size = ty.bitSize(mod); // if (bit_size > 64) return .memory; return .byval; }, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 64) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (ctx == .ret and bit_size > 128) return .memory; if (bit_size > 512) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, @@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u32); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u32 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > byval_float_count) return invalid; @@ -129,12 +132,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_ty = ty.structFieldType(i, mod); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > byval_float_count) return invalid; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5fb07c5fdc..809c388532 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -217,7 +217,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -228,6 +228,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -347,7 +348,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for riscv64. @@ -470,13 +472,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -656,8 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -727,8 +729,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -755,8 +756,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -804,23 +805,23 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -845,7 +846,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -862,7 +863,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -893,10 +894,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const operand_ty = self.air.typeOf(ty_op.operand); + const mod = self.bin_file.options.module.?; + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1068,18 +1070,18 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; switch (tag) { // Arithmetic operations on integers and floats .add, .sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO immediate operands return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1093,14 +1095,14 @@ fn binOp( .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Air.Inst.Tag = switch (tag) { @@ -1125,8 +1127,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1137,8 +1139,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1331,10 +1333,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const mod = self.bin_file.options.module.?; + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -1498,7 +1501,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1523,14 +1527,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1542,7 +1547,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1583,8 +1588,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -1644,7 +1649,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); _ = ty; const result = self.args[arg_index]; @@ -1698,9 +1703,10 @@ fn airFence(self: *Self) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const fn_ty = self.air.typeOf(pl_op.operand); + const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); @@ -1713,7 +1719,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (self.bin_file.cast(link.File.Elf)) |elf_file| { for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -1736,14 +1742,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (try self.air.value(callee, mod)) |func_value| { + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, .data = .{ .i_type = .{ @@ -1752,7 +1757,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .imm12 = 0, } }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1796,7 +1801,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for an instruction, patch this later const index = try self.addInst(.{ @@ -1825,10 +1831,10 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; - assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); - if (ty.zigTypeTag() == .ErrorSet) + assert(ty.eql(self.typeOf(bin_op.rhs), mod)); + if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); @@ -1869,8 +1875,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -1946,7 +1953,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1973,7 +1980,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2000,7 +2007,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2027,7 +2034,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2107,13 +2114,14 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2176,7 +2184,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -2372,7 +2380,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2489,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -2533,37 +2542,32 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + const inst_ty = self.typeOf(inst); + if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = (try self.air.value(inst, mod)).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } @@ -2616,12 +2620,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -2629,7 +2632,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -2649,8 +2652,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2680,14 +2683,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), } - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { @@ -2731,3 +2734,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index bec1b49a4e..41a1850635 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -3,17 +3,19 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *Module) Class { + const target = mod.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const max_byval_size = target.ptrBitWidth() * 2; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -23,8 +25,8 @@ pub fn classifyType(ty: Type, target: std.Target) Class { return .integer; }, .Union => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -36,21 +38,21 @@ pub fn classifyType(ty: Type, target: std.Target) Class { .Bool => return .integer, .Float => return .byval, .Int, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .integer; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b70bc0f73d..b660126604 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -260,7 +260,7 @@ const BigTomb = struct { pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -271,12 +271,11 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; - log.debug("fn {s}", .{fn_owner_decl.name}); - var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { assert(branch_stack.items.len == 1); @@ -363,7 +362,8 @@ pub fn generate( } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparc64. @@ -490,13 +490,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -676,8 +677,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -758,18 +758,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 32, 64 => { // Only say yes if the operation is @@ -836,8 +836,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -869,19 +870,20 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -935,7 +937,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -1008,17 +1010,17 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const arg = self.args[arg_index]; const mcv = blk: { switch (arg) { .stack_offset => |off| { - const mod = self.bin_file.options.module.?; - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; const offset = off + abi_size; @@ -1063,8 +1065,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1088,8 +1090,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1115,7 +1117,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1203,6 +1205,7 @@ fn airBreakpoint(self: *Self) !void { } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. @@ -1217,15 +1220,15 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { // TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A. const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand_ty.zigTypeTag()) { + const operand_ty = self.typeOf(ty_op.operand); + switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits == 8) break :result operand; const abi_size = int_info.bits >> 3; - const abi_align = operand_ty.abiAlignment(self.target.*); + const abi_align = operand_ty.abiAlignment(mod); const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { Endian.Big => ASI.asi_primary_little, Endian.Little => ASI.asi_primary, @@ -1293,10 +1296,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); - const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const ty = self.typeOf(callee); + const mod = self.bin_file.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -1316,7 +1320,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { @@ -1337,10 +1341,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -1348,7 +1351,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); } else unreachable; - try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jmpl, @@ -1367,14 +1370,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .nop, .data = .{ .nop = {} }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .o7, mcv); @@ -1422,25 +1425,24 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); @@ -1450,7 +1452,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{ .lhs = bin_op.lhs, @@ -1512,8 +1514,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -1603,7 +1605,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -1630,7 +1632,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -1656,8 +1658,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -1752,10 +1755,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const operand_ty = self.air.typeOf(ty_op.operand); + const mod = self.bin_file.options.module.?; + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1777,7 +1781,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1787,7 +1791,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isNonErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1812,15 +1816,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_ty = self.typeOfIndex(inst); + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1835,7 +1840,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1878,8 +1883,8 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead @@ -1893,8 +1898,8 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); assert(lhs_ty.eql(rhs_ty, self.bin_file.options.module.?)); if (self.liveness.isUnused(inst)) @@ -2037,18 +2042,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...32 => { try self.spillConditionFlagsIfOccupied(); @@ -2101,9 +2106,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -2116,7 +2122,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }; }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { const op_reg = switch (operand) { .register => |r| r, @@ -2150,7 +2156,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -2280,8 +2286,8 @@ fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); // TODO add safety check @@ -2332,16 +2338,17 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillConditionFlagsIfOccupied(); @@ -2423,9 +2430,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2439,6 +2446,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -2447,12 +2455,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); - const slice_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const slice_ty = self.typeOf(bin_op.lhs); + const elem_ty = slice_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) @@ -2537,8 +2544,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2564,9 +2571,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_ty = self.typeOf(operand); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -2651,8 +2659,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -2666,7 +2674,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union = try self.resolveInst(pl_op.operand); const is_err_result = try self.isErr(error_union_ty, error_union); const reloc = try self.condBr(is_err_result); @@ -2696,12 +2704,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_union_ty = self.typeOf(ty_op.operand); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -2709,11 +2718,12 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + const error_union_ty = self.typeOf(ty_op.operand); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -2722,12 +2732,13 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const payload_ty = error_union_ty.errorUnionPayload(); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -2742,12 +2753,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -2782,9 +2794,10 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -2792,22 +2805,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -2860,12 +2872,12 @@ fn binOp( .xor, .cmp_eq, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Only say yes if the operation is // commutative, i.e. we can swap both of the @@ -2934,10 +2946,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const result_reg = result.register; try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); @@ -2951,11 +2963,11 @@ fn binOp( }, .div_trunc => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = switch (tag) { .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), @@ -2984,14 +2996,14 @@ fn binOp( }, .ptr_add => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Mir.Inst.Tag = switch (tag) { @@ -3005,7 +3017,7 @@ fn binOp( // multiplying it with elem_size const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); + const addr = try self.binOp(tag, lhs, offset, Type.manyptr_u8, Type.usize, null); return addr; } }, @@ -3016,7 +3028,7 @@ fn binOp( .bool_and, .bool_or, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert(lhs != .immediate); // should have been handled by Sema assert(rhs != .immediate); // should have been handled by Sema @@ -3046,10 +3058,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // 32 and 64 bit operands doesn't need truncating if (int_info.bits == 32 or int_info.bits == 64) return result; @@ -3068,10 +3080,10 @@ fn binOp( .shl_exact, .shr_exact, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = rhs == .immediate; @@ -3393,7 +3405,8 @@ fn binOpRegister( fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3402,13 +3415,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .register, .stack_offset, .memory => operand_mcv, .immediate => blk: { const new_mcv = try self.allocRegOrMem(block, true); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -3512,16 +3525,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3555,8 +3569,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -3730,6 +3744,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3928,19 +3943,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod)); }, .stack_offset => |off| { const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); - try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3948,7 +3964,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -3974,11 +3990,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_ty = ty.structFieldType(1, mod); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4024,11 +4040,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp); const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs); @@ -4152,13 +4164,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); + const mod = self.bin_file.options.module.?; + const error_type = ty.errorUnionSet(mod); + const payload_type = ty.errorUnionPayload(mod); - if (!error_type.hasRuntimeBits()) { + if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { + } else if (!payload_type.hasRuntimeBits(mod)) { + if (error_type.abiSize(mod) <= 8) { const reg_mcv: MCValue = switch (operand) { .register => operand, else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, @@ -4249,8 +4262,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4321,11 +4335,11 @@ fn minMax( ) InnerError!MCValue { const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO min/max on floats", .{}), .Vector => return self.fail("TODO min/max on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO skip register setting when one of the operands // is a small (fits in i13) immediate. @@ -4406,8 +4420,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4441,12 +4454,11 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -4454,7 +4466,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -4477,8 +4489,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) .callee => abi.c_abi_int_param_regs_callee_view, }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4505,12 +4517,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) result.stack_byte_count = next_stack_offset; result.stack_align = 16; - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4528,44 +4540,41 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) return result; } -fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } +fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) - return MCValue{ .none = {} }; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); - switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { - // Constants have static lifetimes, so they are always memoized in the outer most table. - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); - if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; - gop.value_ptr.* = try self.genTypedValue(.{ - .ty = inst_ty, - .val = self.air.values[ty_pl.payload], - }); - } - return gop.value_ptr.*; - }, - .const_ty => unreachable, - else => return self.getResolvedInstValue(inst_index), + if (Air.refToIndex(ref)) |inst| { + switch (self.air.instructions.items(.tag)[inst]) { + .interned => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst); + if (!gop.found_existing) { + const interned = self.air.instructions.items(.data)[inst].interned; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = ty, + .val = interned.toValue(), + }); + } + return gop.value_ptr.*; + }, + else => return self.getResolvedInstValue(inst), + } } + + return self.genTypedValue(.{ + .ty = ty, + .val = (try self.air.value(ref, mod)).?, + }); } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for a branch instruction, patch this later @@ -4638,7 +4647,7 @@ fn spillConditionFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4662,11 +4671,12 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const abi_size = value_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4707,10 +4717,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const ptr_ty = self.typeOf(operand); + const struct_ty = ptr_ty.childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4748,8 +4759,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -4866,3 +4878,13 @@ fn wantSafety(self: *Self) bool { .ReleaseSmall => false, }; } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d4be9bf139..877db4b623 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -11,6 +11,7 @@ const log = std.log.scoped(.codegen); const codegen = @import("../../codegen.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Decl = Module.Decl; const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; @@ -764,8 +765,9 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { + const mod = func.bin_file.base.options.module.?; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(func.decl); + const src_loc = src.toSrcLoc(func.decl, mod); func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -788,9 +790,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); assert(!gop.found_existing); - const val = func.air.value(ref).?; - const ty = func.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { + const mod = func.bin_file.base.options.module.?; + const val = (try func.air.value(ref, mod)).?; + const ty = func.typeOf(ref); + if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -801,7 +804,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result = if (isByRef(ty, func.target)) blk: { + const result = if (isByRef(ty, mod)) blk: { const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index); break :blk WValue{ .memory = sym_index }; } else try func.lowerConstant(val, ty); @@ -880,7 +883,7 @@ fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !B fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { const inst = Air.refToIndex(ref) orelse return; - if (func.air.instructions.items(.tag)[inst] == .constant) return; + assert(func.air.instructions.items(.tag)[inst] != .interned); // Branches are currently only allowed to free locals allocated // within their own branch. // TODO: Upon branch consolidation free any locals if needed. @@ -987,8 +990,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { - return switch (ty.zigTypeTag()) { +fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { + const target = mod.getTarget(); + return switch (ty.zigTypeTag(mod)) { .Float => blk: { const bits = ty.floatBits(target); if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16 @@ -998,30 +1002,26 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { return wasm.Valtype.i32; // represented as pointer to stack }, .Int, .Enum => blk: { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - return typeToValtype(struct_obj.backing_int_ty, target); + const struct_obj = mod.typeToStruct(ty).?; + return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, }, - .Vector => switch (determineSimdStoreStrategy(ty, target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => { - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - const int_ty = Type.initPayload(&int_ty_payload.base); - return typeToValtype(int_ty, target); + const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, }, @@ -1030,17 +1030,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, target: std.Target) u8 { - return wasm.valtype(typeToValtype(ty, target)); +fn genValtype(ty: Type, mod: *Module) u8 { + return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, target: std.Target) u8 { - return switch (ty.tag()) { - .void, .noreturn => wasm.block_empty, - else => genValtype(ty, target), +fn genBlockType(ty: Type, mod: *Module) u8 { + return switch (ty.ip_index) { + .void_type, .noreturn_type => wasm.block_empty, + else => genValtype(ty, mod), }; } @@ -1101,7 +1101,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const valtype = typeToValtype(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const valtype = typeToValtype(ty, mod); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); @@ -1132,7 +1133,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - try func.locals.append(func.gpa, genValtype(ty, func.target)); + const mod = func.bin_file.base.options.module.?; + try func.locals.append(func.gpa, genValtype(ty, mod)); const initial_index = func.local_index; func.local_index += 1; return WValue{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1140,48 +1142,55 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Generates a `wasm.Type` from a given function type. /// Memory is owned by the caller. -fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type { +fn genFunctype( + gpa: Allocator, + cc: std.builtin.CallingConvention, + params: []const InternPool.Index, + return_type: Type, + mod: *Module, +) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, target)) { + if (firstParamSRet(cc, return_type, mod)) { try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime()) { + } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) { if (cc == .C) { - const res_classes = abi.classifyType(return_type, target); + const res_classes = abi.classifyType(return_type, mod); assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, target); - try returns.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(return_type, mod); + try returns.append(typeToValtype(scalar_type, mod)); } else { - try returns.append(typeToValtype(return_type, target)); + try returns.append(typeToValtype(return_type, mod)); } - } else if (return_type.isError()) { + } else if (return_type.isError(mod)) { try returns.append(.i32); } // param types - for (params) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + for (params) |param_type_ip| { + const param_type = param_type_ip.toType(); + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { .C => { - const param_classes = abi.classifyType(param_type, target); + const param_classes = abi.classifyType(param_type, mod); for (param_classes) |class| { if (class == .none) continue; if (class == .direct) { - const scalar_type = abi.scalarType(param_type, target); - try temp_params.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(param_type, mod); + try temp_params.append(typeToValtype(scalar_type, mod)); } else { - try temp_params.append(typeToValtype(param_type, target)); + try temp_params.append(typeToValtype(param_type, mod)); } } }, - else => if (isByRef(param_type, target)) + else => if (isByRef(param_type, mod)) try temp_params.append(.i32) else - try temp_params.append(typeToValtype(param_type, target)), + try temp_params.append(typeToValtype(param_type, mod)), } } @@ -1194,20 +1203,22 @@ fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []cons pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { _ = src_loc; + const mod = bin_file.options.module.?; + const func = mod.funcPtr(func_index); var code_gen: CodeGen = .{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .code = code, .decl_index = func.owner_decl, - .decl = bin_file.options.module.?.declPtr(func.owner_decl), + .decl = mod.declPtr(func.owner_decl), .err_msg = undefined, .locals = .{}, .target = bin_file.options.target, @@ -1226,8 +1237,9 @@ pub fn generate( } fn genFunc(func: *CodeGen) InnerError!void { - const fn_info = func.decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(func.decl.ty).?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1253,8 +1265,8 @@ fn genFunc(func: *CodeGen) InnerError!void { // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); - const last_inst_ty = func.air.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) { + const last_inst_ty = func.typeOfIndex(inst); + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); } } @@ -1335,10 +1347,9 @@ const CallWValues = struct { }; fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); - defer func.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallWValues = .{ .args = &.{}, .return_value = .none, @@ -1350,8 +1361,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1360,8 +1370,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { - for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + for (fn_info.param_types) |ty| { + if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1370,8 +1380,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV } }, .C => { - for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, func.target); + for (fn_info.param_types) |ty| { + const ty_classes = abi.classifyType(ty.toType(), mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -1385,11 +1395,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, target), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => { - const ty_classes = abi.classifyType(return_type, target); + const ty_classes = abi.classifyType(return_type, mod); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1405,16 +1415,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } - const ty_classes = abi.classifyType(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const ty_classes = abi.classifyType(ty, mod); assert(ty_classes[0] != .none); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => { if (ty_classes[0] == .indirect) { return func.lowerToStack(value); } assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, func.target); - const abi_size = scalar_type.abiSize(func.target); + const scalar_type = abi.scalarType(ty, mod); + const abi_size = scalar_type.abiSize(mod); try func.emitWValue(value); // When the value lives in the virtual stack, we must load it onto the actual stack @@ -1422,12 +1433,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: const opcode = buildOpcode(.{ .op = .load, .width = @intCast(u8, abi_size), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = value.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); } }, @@ -1436,7 +1447,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); - assert(ty.abiSize(func.target) == 16); + assert(ty.abiSize(mod) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try func.emitWValue(value); try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); @@ -1503,18 +1514,18 @@ fn restoreStackPointer(func: *CodeGen) !void { /// /// Asserts Type has codegenbits fn allocStack(func: *CodeGen, ty: Type) !WValue { - assert(ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); if (func.initial_stack_value == .none) { try func.initializeStack(); } - const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(module), ty.abiSize(func.target), + ty.fmt(mod), ty.abiSize(mod), }); }; - const abi_align = ty.abiAlignment(func.target); + const abi_align = ty.abiAlignment(mod); if (abi_align > func.stack_alignment) { func.stack_alignment = abi_align; @@ -1531,22 +1542,22 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { - const ptr_ty = func.air.typeOfIndex(inst); - const pointee_ty = ptr_ty.childType(); + const mod = func.bin_file.base.options.module.?; + const ptr_ty = func.typeOfIndex(inst); + const pointee_ty = ptr_ty.childType(mod); if (func.initial_stack_value == .none) { try func.initializeStack(); } - if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.allocStack(Type.usize); // create a value containing just the stack pointer. } - const abi_alignment = ptr_ty.ptrAlignment(func.target); - const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_alignment = ptr_ty.ptrAlignment(mod); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(module), pointee_ty.abiSize(func.target), + pointee_ty.fmt(mod), pointee_ty.abiSize(mod), }); }; if (abi_alignment > func.stack_alignment) { @@ -1704,8 +1715,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, target: std.Target) bool { - switch (ty.zigTypeTag()) { +fn isByRef(ty: Type, mod: *Module) bool { + const target = mod.getTarget(); + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -1726,44 +1738,42 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, - => return ty.hasRuntimeBitsIgnoreComptime(), + => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { - if (ty.castTag(.@"union")) |union_ty| { - if (union_ty.data.layout == .Packed) { - return ty.abiSize(target) > 8; + if (mod.typeToUnion(ty)) |union_obj| { + if (union_obj.layout == .Packed) { + return ty.abiSize(mod) > 8; } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - const struct_obj = struct_ty.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { - return isByRef(struct_obj.backing_int_ty, target); + return isByRef(struct_obj.backing_int_ty, mod); } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, - .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled, - .Int => return ty.intInfo(target).bits > 64, + .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled, + .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + const pl_ty = ty.errorUnionPayload(mod); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; - var buf: Type.Payload.ElemType = undefined; - const pl_type = ty.optionalChild(&buf); - if (pl_type.zigTypeTag() == .ErrorSet) return false; - return pl_type.hasRuntimeBitsIgnoreComptime(); + if (ty.isPtrLikeOptional(mod)) return false; + const pl_type = ty.optionalChild(mod); + if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; + return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, .Pointer => { // Slices act like struct and will be passed by reference - if (ty.isSlice()) return true; + if (ty.isSlice(mod)) return true; return false; }, } @@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag() == .Vector); - if (ty.bitSize(target) != 128) return .unrolled; +fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy { + std.debug.assert(ty.zigTypeTag(mod) == .Vector); + if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; + const target = mod.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -1821,8 +1832,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { - .constant => unreachable, - .const_ty => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), @@ -2062,8 +2072,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; + for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -2080,36 +2093,37 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { } fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const fn_info = func.decl.ty.fnInfo(); - const ret_ty = fn_info.return_type; + const fn_info = mod.typeToFunc(func.decl.ty).?; + const ret_ty = fn_info.return_type.toType(); // result must be stored in the stack and we return a pointer // to the stack instead if (func.return_value != .none) { try func.store(func.return_value, operand, ret_ty, 0); - } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { - switch (ret_ty.zigTypeTag()) { + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (ret_ty.zigTypeTag(mod)) { // Aggregate types can be lowered as a singular value .Struct, .Union => { - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(func.target) * 8), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); }, else => try func.emitWValue(operand), } } else { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) { try func.addImm32(0); } else { try func.emitWValue(operand); @@ -2122,15 +2136,16 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const child_type = func.air.typeOfIndex(inst).childType(); + const mod = func.bin_file.base.options.module.?; + const child_type = func.typeOfIndex(inst).childType(mod); var result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { break :result try func.allocStack(Type.usize); // create pointer to void } - const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { break :result func.return_value; } @@ -2141,16 +2156,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.air.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(mod); - const fn_info = func.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (ret_ty.isError()) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2165,42 +2181,48 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target); + const ret_ty = fn_ty.fnReturnType(mod); + const fn_info = mod.typeToFunc(fn_ty).?; + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand) orelse break :blk null; - const module = func.bin_file.base.options.module.?; + const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; - if (func_val.castTag(.function)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); - break :blk function.data.owner_decl; - } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = module.declPtr(extern_fn.data.owner_decl); - const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); + if (func_val.getFunction(mod)) |function| { + _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + break :blk function.owner_decl; + } else if (func_val.getExternFunc(mod)) |extern_func| { + const ext_decl = mod.declPtr(extern_func.decl); + const ext_info = mod.typeToFunc(ext_decl.ty).?; + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type); + const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( - mem.sliceTo(ext_decl.name, 0), + mod.intern_pool.stringToSlice(ext_decl.name), atom.getSymbolIndex().?, - ext_decl.getExternFn().?.lib_name, + mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name), type_index, ); - break :blk extern_fn.data.owner_decl; - } else if (func_val.castTag(.decl_ref)) |decl_ref| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data); - break :blk decl_ref.data; + break :blk extern_func.decl; + } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| { + _ = try func.bin_file.getOrCreateAtomForDecl(decl); + break :blk decl; + }, + else => {}, + }, + else => {}, } return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); }; @@ -2214,10 +2236,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif for (args) |arg| { const arg_val = try func.resolveInst(arg); - const arg_ty = func.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + const arg_ty = func.typeOf(arg); + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); + try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } if (callee) |direct| { @@ -2226,11 +2248,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else { // in this case we call a function pointer // so load its value onto the stack - std.debug.assert(ty.zigTypeTag() == .Pointer); + std.debug.assert(ty.zigTypeTag(mod) == .Pointer); const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2238,18 +2260,18 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } const result_value = result_value: { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; - } else if (ret_ty.isNoReturn()) { + } else if (ret_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); break :result_value WValue{ .none = {} }; } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) { + } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); const result = try func.allocStack(scalar_type); try func.store(result, result_local, scalar_type, 0); break :result_value result; @@ -2272,6 +2294,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -2281,26 +2304,22 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); - const ptr_info = ptr_ty.ptrInfo().data; - const ty = ptr_ty.childType(); + const ptr_ty = func.typeOf(bin_op.lhs); + const ptr_info = ptr_ty.ptrInfo(mod); + const ty = ptr_ty.childType(mod); if (ptr_info.host_size == 0) { try func.store(lhs, rhs, ty, 0); } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); - if (isByRef(int_elem_ty, func.target)) { + if (isByRef(int_elem_ty, mod)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1); + var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); mask <<= @intCast(u6, ptr_info.bit_offset); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.host_size <= 4) @@ -2329,11 +2348,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const abi_size = ty.abiSize(func.target); - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const abi_size = ty.abiSize(mod); + switch (ty.zigTypeTag(mod)) { .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + const pl_ty = ty.errorUnionPayload(mod); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -2341,26 +2361,25 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + const pl_ty = ty.optionalChild(mod); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } - if (pl_ty.zigTypeTag() == .ErrorSet) { + if (pl_ty.zigTypeTag(mod) == .ErrorSet) { return func.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, func.target)) { + .Struct, .Array, .Union => if (isByRef(ty, mod)) { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, func.target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2374,13 +2393,13 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // store pointer first // lower it to the stack so we do not have to store rhs into a local first try func.emitWValue(lhs); @@ -2404,7 +2423,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2418,7 +2437,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, func.target); + const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @intCast(u8, abi_size * 8), @@ -2428,21 +2447,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // store rhs value at stack pointer's location in memory try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) }, ); } fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); - const ptr_ty = func.air.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_ty = func.typeOf(ty_op.operand); + const ptr_info = ptr_ty.ptrInfo(mod); - if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, func.target)) { + if (isByRef(ty, mod)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2455,11 +2475,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // at this point we have a non-natural alignment, we must // shift the value to obtain the correct bit. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); const shift_val = if (ptr_info.host_size <= 4) WValue{ .imm32 = ptr_info.bit_offset } else if (ptr_info.host_size <= 8) @@ -2479,25 +2495,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // load local's value from memory by its stack position try func.emitWValue(operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes const extra_index = @intCast(u32, func.mir_extra.items.len); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(func.target)); + const abi_size = @intCast(u8, ty.abiSize(mod)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, .signedness = .unsigned, @@ -2505,19 +2522,20 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) }, ); return WValue{ .stack = {} }; } fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; - const cc = func.decl.ty.fnInfo().cc; - const arg_ty = func.air.typeOfIndex(inst); + const cc = mod.typeToFunc(func.decl.ty).?.cc; + const arg_ty = func.typeOfIndex(inst); if (cc == .C) { - const arg_classes = abi.classifyType(arg_ty, func.target); + const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { if (class != .none) { func.arg_index += 1; @@ -2527,7 +2545,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When we have an argument that's passed using more than a single parameter, // we combine them into a single stack value if (arg_classes[0] == .direct and arg_classes[1] == .direct) { - if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) { + if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) { return func.fail( "TODO: Implement C-ABI argument for type '{}'", .{arg_ty.fmt(func.bin_file.base.options.module.?)}, @@ -2557,11 +2575,12 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); // For certain operations, such as shifting, the types are different. // When converting this to a WebAssembly type, they *must* match to perform @@ -2570,10 +2589,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2593,6 +2612,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { @@ -2600,8 +2620,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, func.target)) { - if (ty.zigTypeTag() == .Int) { + if (isByRef(ty, mod)) { + if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { return func.fail( @@ -2613,8 +2633,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, func.target), - .signedness = if (ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(ty, mod), + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); try func.emitWValue(rhs); @@ -2625,14 +2645,15 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! } fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - if (ty.intInfo(func.target).bits > 128) { + const mod = func.bin_file.base.options.module.?; + if (ty.intInfo(mod).bits > 128) { return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } switch (op) { - .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), - .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), - .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), .xor => { const result = try func.allocStack(ty); try func.emitWValue(result); @@ -2756,14 +2777,15 @@ const FloatOp = enum { fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ty = func.air.typeOf(un_op); + const ty = func.typeOf(un_op); const result = try (try func.floatOp(op, ty, &.{operand})).toLocal(func, ty); func.finishAir(inst, result, &.{un_op}); } fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement floatOps for vectors", .{}); } @@ -2773,7 +2795,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2821,20 +2843,21 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In }; // fma requires three operands - var param_types_buffer: [3]Type = .{ ty, ty, ty }; + var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index }; const param_types = param_types_buffer[0..args.len]; return func.callIntrinsic(fn_name, param_types, ty, args); } fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } @@ -2845,10 +2868,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2877,8 +2900,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack. fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - assert(ty.abiSize(func.target) <= 16); - const bitsize = @intCast(u16, ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + assert(ty.abiSize(mod) <= 16); + const bitsize = @intCast(u16, ty.bitSize(mod)); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2914,43 +2938,67 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); +fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; + const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr; + switch (ptr.addr) { + .decl => |decl_index| { + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .decl_ref => { - const decl_index = ptr_val.castTag(.decl_ref).?.data; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .mut_decl => |mut_decl| { + const decl_index = mut_decl.decl; + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .variable => { - const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), + .opt_payload => |base_ptr| { + return func.lowerParentPtr(base_ptr.toValue()); }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_ty = field_ptr.container_ty; + .comptime_field => unreachable, + .elem => |elem| { + const index = elem.index; + const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + const offset = index * elem_type.abiSize(mod); + const array_ptr = try func.lowerParentPtr(elem.base.toValue()); - const field_offset = switch (parent_ty.zigTypeTag()) { - .Struct => switch (parent_ty.containerLayout()) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target), - else => parent_ty.structFieldOffset(field_ptr.field_index, func.target), + return switch (array_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, }, - .Union => switch (parent_ty.containerLayout()) { + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; + }, + .field => |field| { + const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + const parent_ptr = try func.lowerParentPtr(field.base.toValue()); + + const offset = switch (parent_ty.zigTypeTag(mod)) { + .Struct => switch (parent_ty.containerLayout(mod)) { + .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), + else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), + }, + .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, else => blk: { - const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target); + const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) break :blk 0; if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk field_offset; + const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk offset; }, }, - .Pointer => switch (parent_ty.ptrSize()) { - .Slice => switch (field_ptr.field_index) { + .Pointer => switch (parent_ty.ptrSize(mod)) { + .Slice => switch (field.index) { 0 => 0, 1 => func.ptrSize(), else => unreachable, @@ -2959,51 +3007,51 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset)); + + return switch (parent_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target); - return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); - }, - .opt_payload_ptr => { - const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - return func.lowerParentPtr(payload_ptr.container_ptr, offset); - }, - else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } } fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - module.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + try mod.markDeclAlive(decl); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - if (tv.ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (tv.ty.isSlice(mod)) { return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { + const decl = mod.declPtr(decl_index); + if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; } - module.markDeclAlive(decl); + try mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = atom.sym_index; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try func.bin_file.addTableFunction(target_sym_index); return WValue{ .function_index = target_sym_index }; } else if (offset == 0) { @@ -3028,142 +3076,201 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( } fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - if (val.isUndefDeep()) return func.emitUndefined(ty); - if (val.castTag(.decl_ref)) |decl_ref| { - const decl_index = decl_ref.data; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { - const decl_index = decl_ref_mut.data.decl_index; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - const target = func.target; - switch (ty.zigTypeTag()) { - .Void => return WValue{ .none = {} }, - .Int => { - const int_info = ty.intInfo(func.target); + if (val.isUndefDeep(mod)) return func.emitUndefined(ty); + + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { + .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .Struct => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + .Vector => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .Frame, + .AnyFrame, + => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => return WValue{ .imm32 = switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + } }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => { + const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u6, int_info.bits), )) }, 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u7, int_info.bits), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) }, + 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, - 32 => return WValue{ .float32 = val.toFloat(f32) }, - 64 => return WValue{ .float64 = val.toFloat(f64) }, - else => unreachable, + .err => |err| { + const int = try mod.getErrorValue(err.name); + return WValue{ .imm32 = int }; }, - .Pointer => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - .zero, .null_value => return WValue{ .imm32 = 0 }, - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), - }, - .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return WValue{ .imm32 = field_index.data }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.lowerConstant(tag_val, enum_full.tag_ty); - } else { - return WValue{ .imm32 = field_index.data }; - } - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - const enum_val = enum_data.values.keys()[index]; - return func.lowerConstant(enum_val, enum_data.tag_ty); - }, - else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); - return func.lowerConstant(val, int_tag_ty); - } - }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?); - return WValue{ .imm32 = kv.value }; - }, - else => return WValue{ .imm32 = 0 }, - }, - .ErrorUnion => { - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + .error_union => |error_union| { + const err_tv: TypedValue = switch (error_union.val) { + .err_name => |err_name| .{ + .ty = ty.errorUnionSet(mod), + .val = (try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } })).toValue(), + }, + .payload => .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, + }; + const payload_type = ty.errorUnionPayload(mod); + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.initTag(.zero); - return func.lowerConstant(err_val, error_type); + return func.lowerConstant(err_tv.val, err_tv.ty); } + return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload()) { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); - if (val.castTag(.opt_payload)) |payload| { - return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull()) { - return WValue{ .imm32 = 0 }; + .enum_tag => |enum_tag| { + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f32 => |f32_val| return WValue{ .float32 = f32_val }, + .f64 => |f64_val| return WValue{ .float64 = f64_val }, + else => unreachable, + }, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), + .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), + .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()), + .opt_payload, .elem, .field => return func.lowerParentPtr(val), + else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}), + }, + .opt => if (ty.optionalReprIsPayload(mod)) { + const pl_ty = ty.optionalChild(mod); + if (val.optionalValue(mod)) |payload| { + return func.lowerConstant(payload, pl_ty); } else { - return func.lowerConstant(val, pl_ty); + return WValue{ .imm32 = 0 }; } } else { - const is_pl = val.tag() == .opt_payload; - return WValue{ .imm32 = @boolToInt(is_pl) }; + return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) }; }, - .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer - val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - var payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = std.mem.readIntLittle(u64, &buf), - }; - const int_val = Value.initPayload(&payload.base); - return func.lowerConstant(int_val, struct_obj.backing_int_ty); + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .vector_type => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .struct_type, .anon_struct_type => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + else => unreachable, }, - .Vector => { - assert(determineSimdStoreStrategy(ty, target) == .direct); - var buf: [16]u8 = undefined; - val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; - return func.storeSimdImmd(buf); - }, - .Union => { + .un => |union_obj| { // in this case we have a packed union which will not be passed by reference. - const union_ty = ty.cast(Type.Payload.Union).?.data; - const union_obj = val.castTag(.@"union").?.data; - const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; - const field_ty = union_ty.fields.values()[field_index].ty; - return func.lowerConstant(union_obj.val, field_ty); + const field_index = ty.unionTagFieldIndex(union_obj.tag.toValue(), func.bin_file.base.options.module.?).?; + const field_ty = ty.unionFields(mod).values()[field_index].ty; + return func.lowerConstant(union_obj.val.toValue(), field_ty); }, - else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .memoized_call => unreachable, } } @@ -3176,9 +3283,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { } fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, - .Int, .Enum => switch (ty.intInfo(func.target).bits) { + .Int, .Enum => switch (ty.intInfo(mod).bits) { 0...32 => return WValue{ .imm32 = 0xaaaaaaaa }, 33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, @@ -3195,9 +3303,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { else => unreachable, }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); - if (ty.optionalReprIsPayload()) { + const pl_ty = ty.optionalChild(mod); + if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -3206,11 +3313,11 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { return WValue{ .imm32 = 0xaaaaaaaa }; }, .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, - else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}), + else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}), } } @@ -3218,56 +3325,52 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { - const target = func.target; - switch (ty.zigTypeTag()) { - .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return @bitCast(i32, field_index.data), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.valueAsI32(tag_val, enum_full.tag_ty); - } else return @bitCast(i32, field_index.data); - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - return func.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty); - }, - else => unreachable, - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); - return func.valueAsI32(val, int_tag_ty); - } + const mod = func.bin_file.base.options.module.?; + + switch (val.ip_index) { + .none => {}, + .bool_true => return 1, + .bool_false => return 0, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), + .int => |int| intStorageAsI32(int.storage, mod), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), + .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)), + else => unreachable, }, - .Int => switch (ty.intInfo(func.target).signedness) { - .signed => return @truncate(i32, val.toSignedInt(target)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))), - }, - .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function - return @bitCast(i32, kv.value); - }, - .Bool => return @intCast(i32, val.toSignedInt(target)), - .Pointer => return @intCast(i32, val.toSignedInt(target)), - else => unreachable, // Programmer called this function for an illegal type } + + return switch (ty.zigTypeTag(mod)) { + .ErrorSet => @bitCast(i32, val.getErrorInt(mod)), + else => unreachable, // Programmer called this function for an illegal type + }; +} + +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, mod); +} + +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { + return switch (storage) { + .i64 => |x| @intCast(i32, x), + .u64 => |x| @bitCast(i32, @intCast(u32, x)), + .big_int => unreachable, + .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), + }; } fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const block_ty = func.air.getRefType(ty_pl.ty); - const wasm_block_ty = genBlockType(block_ty, func.target); + const wasm_block_ty = genBlockType(block_ty, mod); const extra = func.air.extraData(Air.Block, ty_pl.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else WValue.none; @@ -3369,7 +3472,7 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const operand_ty = func.air.typeOf(bin_op.lhs); + const operand_ty = func.typeOf(bin_op.lhs); const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } @@ -3379,16 +3482,16 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In /// NOTE: This leaves the result on top of the stack, rather than a new local. fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { + const payload_ty = ty.optionalChild(mod); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs return func.cmpOptionals(lhs, rhs, ty, op); } - } else if (isByRef(ty, func.target)) { + } else if (isByRef(ty, mod)) { return func.cmpBigInt(lhs, rhs, ty, op); } else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) { return func.cmpFloat16(lhs, rhs, op); @@ -3401,13 +3504,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (ty.zigTypeTag() != .Int) break :blk .unsigned; + if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk ty.intInfo(func.target).signedness; + break :blk ty.intInfo(mod).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3464,11 +3567,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const br = func.air.instructions.items(.data)[inst].br; const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) { + if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3489,17 +3593,18 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (operand_ty.zigTypeTag() == .Bool) { + if (operand_ty.zigTypeTag(mod) == .Bool) { try func.emitWValue(operand); try func.addTag(.i32_eqz); const not_tmp = try func.allocLocal(operand_ty); try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(func.target).bits; + const operand_bits = operand_ty.intInfo(mod).bits; const wasm_bits = toWasmBits(operand_bits) orelse { return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); }; @@ -3554,8 +3659,8 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const result = result: { const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.air.typeOfIndex(inst); - const given_ty = func.air.typeOf(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); @@ -3566,16 +3671,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; - if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; - if (wanted_ty.bitSize(func.target) > 64) return operand; - assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt())); + if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand; + if (wanted_ty.bitSize(mod) > 64) return operand; + assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, func.target), - .valtype2 = typeToValtype(given_ty, func.target), + .valtype1 = typeToValtype(wanted_ty, mod), + .valtype2 = typeToValtype(given_ty, mod), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3583,19 +3689,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn } fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.air.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.air.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(mod); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3609,19 +3717,20 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { - const result_ty = func.air.typeOfIndex(inst); - const offset = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const result_ty = func.typeOfIndex(inst); + const offset = switch (struct_ty.containerLayout(mod)) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { - if (result_ty.ptrInfo().data.host_size != 0) { + if (result_ty.ptrInfo(mod).host_size != 0) { break :offset @as(u32, 0); } - break :offset struct_ty.packedStructFieldByteOffset(index, func.target); + break :offset struct_ty.packedStructFieldByteOffset(index, mod); }, .Union => 0, else => unreachable, }, - else => struct_ty.structFieldOffset(index, func.target), + else => struct_ty.structFieldOffset(index, mod), }; // save a load and store when we can simply reuse the operand if (offset == 0) { @@ -3636,22 +3745,23 @@ fn structFieldPtr( } fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = func.air.typeOf(struct_field.struct_operand); + const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + const field_ty = struct_ty.structFieldType(field_index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); - const result = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + const result = switch (struct_ty.containerLayout(mod)) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - const offset = struct_obj.packedFieldBitOffset(func.target, field_index); + const struct_obj = mod.typeToStruct(struct_ty).?; + const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; - const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse { + const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; const const_wvalue = if (wasm_bits == 32) @@ -3667,25 +3777,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else try func.binOp(operand, const_wvalue, backing_ty, .shr); - if (field_ty.zigTypeTag() == .Float) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) { + } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) { // In this case we do not have to perform any transformations, // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); - } else if (field_ty.isPtrAtRuntime()) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3693,8 +3795,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - if (isByRef(struct_ty, func.target)) { - if (!isByRef(field_ty, func.target)) { + if (isByRef(struct_ty, mod)) { + if (!isByRef(field_ty, mod)) { const val = try func.load(operand, field_ty, 0); break :result try val.toLocal(func, field_ty); } else { @@ -3704,26 +3806,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, struct_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field_ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3733,11 +3823,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }, else => result: { - const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)}); + const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse { + return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)}); }; - if (isByRef(field_ty, func.target)) { + if (isByRef(field_ty, mod)) { switch (operand) { .stack_offset => |stack_offset| { break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -3754,11 +3843,12 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; // result type is always 'noreturn' const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const target = try func.resolveInst(pl_op.operand); - const target_ty = func.air.typeOf(pl_op.operand); + const target_ty = func.typeOf(pl_op.operand); const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); defer func.gpa.free(liveness.deaths); @@ -3787,7 +3877,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref).?; + const item_val = (try func.air.value(ref, mod)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -3810,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the target is an integer size larger than u32, we have no way to use the value // as an index, therefore we also use an if/else-chain for those cases. // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'. - const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32; + const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32; const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len]; const has_else_body = else_body.len != 0; @@ -3855,7 +3945,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for errors that are not present in any branch. This is fine as this default // case will never be hit for those cases but we do save runtime cost and size // by using a jump table for this instead of if-else chains. - break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable; + break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable; }; func.mir_extra.appendAssumeCapacity(idx); } else if (has_else_body) { @@ -3866,10 +3956,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(func.target).signedness; + break :blk target_ty.intInfo(mod).signedness; }; try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body)); @@ -3882,7 +3972,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -3896,7 +3986,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .eq, .signedness = signedness, }); @@ -3933,13 +4023,14 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const err_union_ty = func.air.typeOf(un_op); - const pl_ty = err_union_ty.errorUnionPayload(); + const err_union_ty = func.typeOf(un_op); + const pl_ty = err_union_ty.errorUnionPayload(mod); const result = result: { - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { switch (opcode) { .i32_ne => break :result WValue{ .imm32 = 0 }, .i32_eq => break :result WValue{ .imm32 = 1 }, @@ -3948,10 +4039,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } try func.emitWValue(operand); - if (pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)), - .alignment = Type.anyerror.abiAlignment(func.target), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -3967,23 +4058,24 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const op_ty = func.typeOf(ty_op.operand); + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (op_is_ptr) { break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)); - if (op_is_ptr or isByRef(payload_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -3994,48 +4086,50 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const op_ty = func.typeOf(ty_op.operand); + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (err_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { break :result WValue{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target))); + const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const err_ty = func.air.typeOfIndex(inst); + const err_ty = func.typeOfIndex(inst); - const pl_ty = func.air.typeOf(ty_op.operand); + const pl_ty = func.typeOf(ty_op.operand); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4043,24 +4137,25 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const err_ty = func.air.getRefType(ty_op.ty); - const pl_ty = err_ty.errorUnionPayload(); + const pl_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target))); + try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target)); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4073,16 +4168,17 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); - if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) { + const operand_ty = func.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); } - if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; + const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4096,8 +4192,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bitsize = @intCast(u16, given.bitSize(func.target)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bitsize = @intCast(u16, given.bitSize(mod)); + const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4110,7 +4207,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro try func.addTag(.i32_wrap_i64); } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { try func.emitWValue(operand); - try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u); + try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4119,14 +4216,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64); + break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); } else operand; // store msb first try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value - if (wanted.isSignedInt()) { + if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); @@ -4141,11 +4238,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const op_ty = func.air.typeOf(un_op); - const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; + const op_ty = func.typeOf(un_op); + const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); func.finishAir(inst, result, &.{un_op}); @@ -4154,20 +4252,19 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - if (!optional_ty.optionalReprIsPayload()) { + const payload_ty = optional_ty.optionalChild(mod); + if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)}); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)}); }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } - } else if (payload_ty.isSlice()) { + } else if (payload_ty.isSlice(mod)) { switch (func.arch()) { .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }), .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }), @@ -4183,18 +4280,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod } fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const opt_ty = func.air.typeOf(ty_op.operand); - const payload_ty = func.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const opt_ty = func.typeOf(ty_op.operand); + const payload_ty = func.typeOfIndex(inst); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } const result = result: { const operand = try func.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand); + if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, func.target)) { + if (isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4205,14 +4303,14 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); const result = result: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4222,22 +4320,21 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const opt_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)}); }; try func.emitWValue(operand); @@ -4250,11 +4347,12 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const payload_ty = func.air.typeOf(ty_op.operand); + const payload_ty = func.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const non_null_bit = try func.allocStack(Type.initTag(.u1)); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const non_null_bit = try func.allocStack(Type.u1); try func.emitWValue(non_null_bit); try func.addImm32(1); try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 }); @@ -4262,13 +4360,12 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOfIndex(inst); - if (op_ty.optionalReprIsPayload()) { + const op_ty = func.typeOfIndex(inst); + if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type @@ -4291,7 +4388,7 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const slice_ty = func.air.typeOfIndex(inst); + const slice_ty = func.typeOfIndex(inst); const slice = try func.allocStack(slice_ty); try func.store(slice, lhs, Type.usize, 0); @@ -4308,13 +4405,14 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const slice_ty = func.air.typeOf(bin_op.lhs); + const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = slice_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); // load pointer onto stack _ = try func.load(slice, Type.usize, 0); @@ -4328,7 +4426,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result_ptr.local.value); - const result = if (!isByRef(elem_ty, func.target)) result: { + const result = if (!isByRef(elem_ty, mod)) result: { const elem_val = try func.load(result_ptr, elem_ty, 0); break :result try elem_val.toLocal(func, elem_ty); } else result_ptr; @@ -4337,11 +4435,12 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); + const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4380,7 +4479,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const wanted_ty = func.air.getRefType(ty_op.ty); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const result = try func.trunc(operand, wanted_ty, op_ty); func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand}); @@ -4389,13 +4488,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const given_bits = @intCast(u16, given_ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bits = @intCast(u16, given_ty.bitSize(mod)); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target)); + const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4412,32 +4512,34 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.air.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(mod); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack const slice_local = try func.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBitsIgnoreComptime()) { + if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.store(slice_local, operand, Type.usize, 0); } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) }; + const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); } fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ptr_ty = func.air.typeOf(un_op); - const result = if (ptr_ty.isSlice()) + const ptr_ty = func.typeOf(un_op); + const result = if (ptr_ty.isSlice(mod)) try func.slicePtr(operand) else switch (operand) { // for stack offset, return a pointer to this offset. @@ -4448,16 +4550,17 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = ptr_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = ptr_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -4472,7 +4575,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_result = val: { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if it's not returned like above @@ -4484,18 +4587,19 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(bin_op.lhs); - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const ptr_ty = func.typeOf(bin_op.lhs); + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); + const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -4513,24 +4617,25 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); - const pointee_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const ptr_ty = func.typeOf(bin_op.lhs); + const pointee_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const valtype = typeToValtype(Type.usize, func.target); + const valtype = typeToValtype(Type.usize, mod); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target)))); + try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4540,6 +4645,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4548,18 +4654,18 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }), + .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), .C, .Many => unreachable, }; - const elem_ty = if (ptr_ty.ptrSize() == .One) - ptr_ty.childType().childType() + const elem_ty = if (ptr_ty.ptrSize(mod) == .One) + ptr_ty.childType(mod).childType(mod) else - ptr_ty.childType(); + ptr_ty.childType(mod); const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); try func.memset(elem_ty, dst_ptr, len, value); @@ -4572,7 +4678,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const abi_size = @intCast(u32, elem_ty.abiSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const abi_size = @intCast(u32, elem_ty.abiSize(mod)); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4660,30 +4767,31 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const array_ty = func.air.typeOf(bin_op.lhs); + const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = array_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = array_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); - if (isByRef(array_ty, func.target)) { + if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { - std.debug.assert(array_ty.zigTypeTag() == .Vector); + std.debug.assert(array_ty.zigTypeTag(mod) == .Vector); switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) { - 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u, - 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u, - 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane, - 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane, + const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) { + 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, + 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, + 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane, + 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane, else => unreachable, }; @@ -4715,7 +4823,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if no longer needed and not returned like above @@ -4728,22 +4836,23 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty); @@ -4752,22 +4861,23 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (op_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -4777,18 +4887,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const ty = func.air.typeOfIndex(inst); - const elem_ty = ty.childType(); + const ty = func.typeOfIndex(inst); + const elem_ty = ty.childType(mod); - if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: { + if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. .stack_offset, .memory, .memory_offset => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.v128_load8_splat), 16 => std.wasm.simdOpcode(.v128_load16_splat), 32 => std.wasm.simdOpcode(.v128_load32_splat), @@ -4803,18 +4914,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - elem_ty.abiAlignment(func.target), + elem_ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); return func.finishAir(inst, result, &.{ty_op.operand}); }, .local => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.i8x16_splat), 16 => std.wasm.simdOpcode(.i16x8_splat), - 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), - 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), + 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), else => break :blk, // Cannot make use of simd-instructions }; const result = try func.allocLocal(ty); @@ -4828,14 +4939,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } } - const elem_size = elem_ty.bitSize(func.target); - const vector_len = @intCast(usize, ty.vectorLen()); + const elem_size = elem_ty.bitSize(mod); + const vector_len = @intCast(usize, ty.vectorLen(mod)); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4855,26 +4966,25 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const inst_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; + const inst_ty = func.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try func.resolveInst(extra.a); const b = try func.resolveInst(extra.b); - const mask = func.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; - const child_ty = inst_ty.childType(); - const elem_size = child_ty.abiSize(func.target); + const child_ty = inst_ty.childType(mod); + const elem_size = child_ty.abiSize(mod); - const module = func.bin_file.base.options.module.?; // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) { + if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { - var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const value = (try mask.elemValue(mod, index)).toSignedInt(mod); try func.emitWValue(result); @@ -4894,8 +5004,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else @@ -4926,25 +5035,26 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; - const result_ty = func.air.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const result_ty = func.typeOfIndex(inst); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); const result: WValue = result_value: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); - const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); - const sentinel = if (result_ty.sentinel()) |sent| blk: { + const elem_ty = result_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -4972,18 +5082,18 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } break :result_value result; }, - .Struct => switch (result_ty.containerLayout()) { + .Struct => switch (result_ty.containerLayout(mod)) { .Packed => { - if (isByRef(result_ty, func.target)) { + if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } - const struct_obj = result_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(result_ty).?; const fields = struct_obj.fields.values(); const backing_type = struct_obj.backing_int_ty; // ensure the result is zero'd const result = try func.allocLocal(backing_type); - if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + if (struct_obj.backing_int_ty.bitSize(mod) <= 32) try func.addImm32(0) else try func.addImm64(0); @@ -4992,20 +5102,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field = fields[elem_index]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32) WValue{ .imm32 = current_bit } else WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(func.target)); - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = value_bit_size, - }; - const int_ty = Type.initPayload(&int_ty_payload.base); + const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. @@ -5027,10 +5133,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(elem_index) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_index); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_ty = result_ty.structFieldType(elem_index, mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5058,39 +5164,36 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { - const union_ty = func.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(func.target); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_ty = func.typeOfIndex(inst); + const layout = union_ty.unionGetLayout(mod); + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); - const enum_field_index = tag_ty.enumFieldIndex(field_name).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); + const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); break :blk try func.lowerConstant(tag_val, tag_ty); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } - assert(!isByRef(union_ty, func.target)); + assert(!isByRef(union_ty, mod)); break :result tag_int; } - if (isByRef(union_ty, func.target)) { + if (isByRef(union_ty, mod)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align >= layout.payload_align) { - if (isByRef(field.ty, func.target)) { + if (isByRef(field.ty, mod)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { @@ -5114,26 +5217,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, union_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field.ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + if (field.ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); - } else if (field.ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field.ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5153,7 +5244,7 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.addLabel(.memory_size, pl_op.payload); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{pl_op.operand}); @@ -5163,7 +5254,7 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; const operand = try func.resolveInst(pl_op.operand); - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.emitWValue(operand); try func.addLabel(.memory_grow, pl_op.payload); try func.addLabel(.local_set, result.local.value); @@ -5171,14 +5262,14 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { } fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); // We store the final result in here that will be validated // if the optional is truly equal. - var result = try func.ensureAllocLocal(Type.initTag(.i32)); + var result = try func.ensureAllocLocal(Type.i32); defer result.free(func); try func.startBlock(.block, wasm.block_empty); @@ -5189,7 +5280,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5207,10 +5298,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.abiSize(func.target) >= 16); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.abiSize(mod) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.bitSize(func.target) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)}); + if (operand_ty.bitSize(mod) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); @@ -5233,7 +5325,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } }, else => { - const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64; + const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64; // leave those value on top of the stack for '.select' const lhs_low_bit = try func.load(lhs, Type.u64, 8); const rhs_low_bit = try func.load(rhs, Type.u64, 8); @@ -5248,10 +5340,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.air.typeOf(bin_op.lhs).childType(); - const tag_ty = func.air.typeOf(bin_op.rhs); - const layout = un_ty.unionGetLayout(func.target); + const un_ty = func.typeOf(bin_op.lhs).childType(mod); + const tag_ty = func.typeOf(bin_op.rhs); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); const union_ptr = try func.resolveInst(bin_op.lhs); @@ -5271,11 +5364,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const un_ty = func.air.typeOf(ty_op.operand); - const tag_ty = func.air.typeOfIndex(inst); - const layout = un_ty.unionGetLayout(func.target); + const un_ty = func.typeOf(ty_op.operand); + const tag_ty = func.typeOfIndex(inst); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); const operand = try func.resolveInst(ty_op.operand); @@ -5292,9 +5386,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty); + const extended = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty); const result = try extended.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5313,7 +5407,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! // call __extendhfsf2(f16) f32 const f32_result = try func.callIntrinsic( "__extendhfsf2", - &.{Type.f16}, + &.{.f16_type}, Type.f32, &.{operand}, ); @@ -5331,15 +5425,15 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const truncated = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty); + const truncated = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty); const result = try truncated.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5362,7 +5456,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } else operand; // call __truncsfhf2(f32) f16 - return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op}); + return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op}); } var fn_name_buf: [12]u8 = undefined; @@ -5371,14 +5465,15 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.air.typeOf(ty_op.operand).childType(); - const payload_ty = err_set_ty.errorUnionPayload(); + const err_set_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = err_set_ty.errorUnionPayload(mod); const operand = try func.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error @@ -5386,26 +5481,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, func.target)), + @intCast(u32, errUnionErrorOffset(payload_ty, mod)), ); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new); + break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); - const parent_ty = func.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target); + const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); @@ -5420,7 +5516,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { - if (ptr_ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (ptr_ty.isSlice(mod)) { return func.slicePtr(ptr); } else { return ptr; @@ -5428,25 +5525,26 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue } fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); - const dst_ty = func.air.typeOf(bin_op.lhs); - const ptr_elem_ty = dst_ty.childType(); + const dst_ty = func.typeOf(bin_op.lhs); + const ptr_elem_ty = dst_ty.childType(mod); const src = try func.resolveInst(bin_op.rhs); - const src_ty = func.air.typeOf(bin_op.rhs); - const len = switch (dst_ty.ptrSize()) { + const src_ty = func.typeOf(bin_op.rhs); + const len = switch (dst_ty.ptrSize(mod)) { .Slice => blk: { const slice_len = try func.sliceLen(dst); - if (ptr_elem_ty.abiSize(func.target) != 1) { + if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) }); + try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5467,17 +5565,18 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - if (op_ty.zigTypeTag() == .Vector) { + if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); } - const int_info = op_ty.intInfo(func.target); + const int_info = op_ty.intInfo(mod); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); @@ -5526,8 +5625,9 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.initTag(.const_slice_u8_sentinel_0); - const abi_size = name_ty.abiSize(func.target); + const name_ty = Type.slice_const_u8_sentinel_0; + const mod = func.bin_file.base.options.module.?; + const abi_size = name_ty.abiSize(mod); const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation try func.emitWValue(error_name_value); @@ -5565,20 +5665,21 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -5628,17 +5729,18 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro var overflow_local = try overflow_bit.toLocal(func, Type.u32); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(op == .add or op == .sub); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits != 128) { return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits}); @@ -5689,31 +5791,32 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, break :blk WValue{ .stack = {} }; }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); const result_ptr = try func.allocStack(result_ty); try func.store(result_ptr, high_op_res, Type.u64, 0); try func.store(result_ptr, tmp_op, Type.u64, 8); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16); + try func.store(result_ptr, overflow_local, Type.u1, 16); return result_ptr; } fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); - const rhs_ty = func.air.typeOf(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5721,7 +5824,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Ensure rhs is coerced to lhs as they must have the same WebAssembly types // before we can perform any binary operation. - const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?; + const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?; const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: { const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try rhs_casted.toLocal(func, lhs_ty); @@ -5745,13 +5848,13 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } @@ -5762,18 +5865,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } // We store the bit if it's overflowed or not in this. As it's zero-initialized // we only need to update it if an overflow (or underflow) occurred. - var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); + var overflow_bit = try func.ensureAllocLocal(Type.u1); defer overflow_bit.free(func); - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); }; @@ -5827,7 +5931,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: { - const new_ty = Type.initTag(.u128); + const new_ty = Type.u128; var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); defer lhs_upcast.free(func); var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); @@ -5847,8 +5951,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); const res = try func.allocLocal(lhs_ty); @@ -5871,20 +5975,20 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5912,7 +6016,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or"); try func.addLabel(.local_set, overflow_bit.local.value); - const tmp_result = try func.allocStack(Type.initTag(.u128)); + const tmp_result = try func.allocStack(Type.u128); try func.emitWValue(tmp_result); const mul3_msb = try func.load(mul3, Type.u64, 0); try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset()); @@ -5922,23 +6026,24 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); - try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); + try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const ty = func.typeOfIndex(inst); + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } - if (ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16) { return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } @@ -5954,18 +6059,19 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE try func.addTag(.select); // store result in local - const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty; + const result_ty = if (isByRef(ty, mod)) Type.u32 else ty; const result = try func.allocLocal(result_ty); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; - const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const ty = func.typeOfIndex(inst); + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -5980,7 +6086,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // call to compiler-rt `fn fmaf(f32, f32, f32) f32` var result = try func.callIntrinsic( "fmaf", - &.{ Type.f32, Type.f32, Type.f32 }, + &.{ .f32_type, .f32_type, .f32_type }, Type.f32, &.{ rhs_ext, lhs_ext, addend_ext }, ); @@ -5994,16 +6100,17 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6046,17 +6153,18 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6113,7 +6221,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand }); @@ -6151,17 +6259,18 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.resolveInst(pl_op.operand); const extra = func.air.extraData(Air.Try, pl_op.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(pl_op.operand); + const err_union_ty = func.typeOf(pl_op.operand); const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); func.finishAir(inst, result, &.{pl_op.operand}); } fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(mod); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6174,24 +6283,25 @@ fn lowerTry( err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; if (operand_is_ptr) { return func.fail("TODO: lowerTry for pointers", .{}); } - const pl_ty = err_union_ty.errorUnionPayload(); - const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(); + const pl_ty = err_union_ty.errorUnionPayload(mod); + const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set try func.startBlock(.block, wasm.block_empty); // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = Type.anyerror.abiAlignment(func.target), + .alignment = Type.anyerror.abiAlignment(mod), }); } try func.addTag(.i32_eqz); @@ -6213,8 +6323,8 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)); - if (isByRef(pl_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6222,15 +6332,16 @@ fn lowerTry( } fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); } - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); // bytes are no-op if (int_info.bits == 8) { @@ -6292,13 +6403,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt()) + const result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6306,13 +6418,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt()) + const div_result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6328,15 +6441,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - if (ty.isUnsignedInt()) { + if (ty.isUnsignedInt(mod)) { const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); - } else if (ty.isSignedInt()) { - const int_bits = ty.intInfo(func.target).bits; + } else if (ty.isSignedInt(mod)) { + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6414,7 +6528,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); }; @@ -6441,7 +6556,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal /// Retrieves the absolute value of a signed integer /// NOTE: Leaves the result value on the stack. fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6476,11 +6592,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6523,7 +6640,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; @@ -6588,8 +6706,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); @@ -6697,7 +6816,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn callIntrinsic( func: *CodeGen, name: []const u8, - param_types: []const Type, + param_types: []const InternPool.Index, return_type: Type, args: []const WValue, ) InnerError!WValue { @@ -6707,12 +6826,13 @@ fn callIntrinsic( }; // Always pass over C-ABI - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.putOrGetFuncType(func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, func.target); + const want_sret_param = firstParamSRet(.C, return_type, mod); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -6724,16 +6844,16 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime()); - try func.lowerArg(.C, param_types[arg_i], arg); + assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod)); + try func.lowerArg(.C, param_types[arg_i].toType(), arg); } // Actually call our intrinsic try func.addLabel(.call, symbol_index); - if (!return_type.hasRuntimeBitsIgnoreComptime()) { + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; - } else if (return_type.isNoReturn()) { + } else if (return_type.isNoReturn(mod)) { try func.addTag(.@"unreachable"); return WValue.none; } else if (want_sret_param) { @@ -6746,11 +6866,11 @@ fn callIntrinsic( fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const enum_ty = func.air.typeOf(un_op); + const enum_ty = func.typeOf(un_op); const func_sym_index = try func.getTagNameFunction(enum_ty); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.lowerToStack(result_ptr); try func.emitWValue(operand); try func.addLabel(.call, func_sym_index); @@ -6759,15 +6879,14 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { - const enum_decl_index = enum_ty.getOwnerDecl(); - const module = func.bin_file.base.options.module.?; + const mod = func.bin_file.base.options.module.?; + const enum_decl_index = enum_ty.getOwnerDecl(mod); var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod)); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -6775,10 +6894,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(mod); - if (int_tag_ty.bitSize(func.target) > 64) { + if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); } @@ -6798,36 +6916,22 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. - for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { + for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. - var name_ty_payload: Type.Payload.Len = .{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = @intCast(u64, tag_name.len), - }; - const name_ty = Type.initPayload(&name_ty_payload.base); - const string_bytes = &module.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len); - const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{ - .bytes = string_bytes, - }, Module.StringLiteralContext{ - .bytes = string_bytes, + const name_ty = try mod.arrayType(.{ + .len = tag_name.len, + .child = .u8_type, + .sentinel = .zero_u8, }); - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, tag_name.len), - }; - string_bytes.appendSliceAssumeCapacity(tag_name); - gop.value_ptr.* = .none; - } - var name_val_payload: Value.Payload.StrLit = .{ - .base = .{ .tag = .str_lit }, - .data = gop.key_ptr.*, - }; - const name_val = Value.initPayload(&name_val_payload.base); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = tag_name }, + } }); const tag_sym_index = try func.bin_file.lowerUnnamedConst( - .{ .ty = name_ty, .val = name_val }, + .{ .ty = name_ty, .val = name_val.toValue() }, enum_decl_index, ); @@ -6839,11 +6943,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.local_get)); try leb.writeULEB128(writer, @as(u32, 1)); - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_value = try func.lowerConstant(Value.initPayload(&tag_val_payload.base), enum_ty); + const tag_val = try mod.enumValueFieldIndex(enum_ty, field_index); + const tag_value = try func.lowerConstant(tag_val, enum_ty); switch (tag_value) { .imm32 => |value| { @@ -6928,27 +7029,27 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target); + const slice_ty = Type.slice_const_u8_sentinel_0; + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const error_set_ty = func.air.getRefType(ty_op.ty); const result = try func.allocLocal(Type.bool); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const module = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = module.global_error_set.get(name).?; + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7019,12 +7120,13 @@ inline fn useAtomicFeature(func: *const CodeGen) bool { } fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(extra.ptr); - const ty = ptr_ty.childType(); - const result_ty = func.air.typeOfIndex(inst); + const ptr_ty = func.typeOf(extra.ptr); + const ty = ptr_ty.childType(mod); + const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); const expected_val = try func.resolveInst(extra.expected_value); @@ -7037,7 +7139,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr_operand); try func.lowerToStack(expected_val); try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(func.target)) { + try func.addAtomicMemArg(switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7045,14 +7147,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); try func.addLabel(.local_set, cmp_result.local.value); break :val val_local; } else val: { - if (ty.abiSize(func.target) > 8) { + if (ty.abiSize(mod) > 8) { return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); @@ -7068,7 +7170,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result_ptr = if (isByRef(result_ty, func.target)) val: { + const result_ptr = if (isByRef(result_ty, mod)) val: { try func.emitWValue(cmp_result); try func.addImm32(-1); try func.addTag(.i32_xor); @@ -7076,7 +7178,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target))); + try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7087,16 +7189,17 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val try WValue.toLocal(.stack, func, result_ty); }; - return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.new_value, extra.expected_value }); + return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.expected_value, extra.new_value }); } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, @@ -7106,7 +7209,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { _ = try func.load(ptr, ty, 0); @@ -7117,12 +7220,13 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try func.resolveInst(pl_op.operand); const operand = try func.resolveInst(extra.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const op: std.builtin.AtomicRmwOp = extra.op(); if (func.useAtomicFeature()) { @@ -7140,7 +7244,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) @@ -7157,7 +7261,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.select); } try func.addAtomicMemArg( - switch (ty.abiSize(func.target)) { + switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7166,7 +7270,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }, ); const select_res = try func.allocLocal(ty); @@ -7185,7 +7289,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => { try func.emitWValue(ptr); try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7226,7 +7330,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7255,7 +7359,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Xor => .xor, else => unreachable, }); - if (ty.isInt() and (op == .Add or op == .Sub)) { + if (ty.isInt(mod) and (op == .Add or op == .Sub)) { _ = try func.wrapOperand(.stack, ty); } try func.store(.stack, .stack, ty, ptr.offset()); @@ -7271,7 +7375,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); @@ -7302,15 +7406,16 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ptr_ty = func.typeOf(bin_op.lhs); + const ty = ptr_ty.childType(mod); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, @@ -7321,7 +7426,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { try func.store(ptr, operand, ty, 0); @@ -7338,3 +7443,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try WValue.toLocal(.stack, func, Type.usize); return func.finishAir(inst, result, &.{}); } + +fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index bfa5324dc6..45ad1d7eb3 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -254,7 +254,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @setCold(true); std.debug.assert(emit.error_msg == null); const mod = emit.bin_file.base.options.module.?; - emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args); + emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(mod), format, args); return error.EmitFail; } diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 4692f65dd1..92b0f4dc40 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -5,9 +5,11 @@ //! Note: Above mentioned document is not an official specification, therefore called a convention. const std = @import("std"); -const Type = @import("../../type.zig").Type; const Target = std.Target; +const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); + /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. pub const Class = enum { direct, indirect, none }; @@ -19,27 +21,28 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, target: Target) [2]Class { - if (!ty.hasRuntimeBitsIgnoreComptime()) return none; - switch (ty.zigTypeTag()) { +pub fn classifyType(ty: Type, mod: *Module) [2]Class { + const target = mod.getTarget(); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; + switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.containerLayout(mod) == .Packed) { + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar - if (ty.structFieldCount() > 1) return memory; + if (ty.structFieldCount(mod) > 1) return memory; // When the struct's alignment is non-natural - const field = ty.structFields().values()[0]; + const field = ty.structFields(mod).values()[0]; if (field.abi_align != 0) { - if (field.abi_align > field.ty.abiAlignment(target)) { + if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; } } - return classifyType(field.ty, target); + return classifyType(field.ty, mod); }, .Int, .Enum, .ErrorSet, .Vector => { - const int_bits = ty.intInfo(target).bits; + const int_bits = ty.intInfo(mod).bits; if (int_bits <= 64) return direct; if (int_bits <= 128) return .{ .direct, .direct }; return memory; @@ -53,22 +56,22 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { .Bool => return direct, .Array => return memory, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return direct; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return direct; }, .Union => { - if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.containerLayout(mod) == .Packed) { + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); - if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, target); + if (ty.unionFields(mod).count() > 1) return memory; + return classifyType(ty.unionFields(mod).values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, target: std.Target) Type { - switch (ty.zigTypeTag()) { +pub fn scalarType(ty: Type, mod: *Module) Type { + switch (ty.zigTypeTag(mod)) { .Struct => { - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - return scalarType(struct_obj.backing_int_ty, target); + const struct_obj = mod.typeToStruct(ty).?; + return scalarType(struct_obj.backing_int_ty, mod); }, else => { - std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), target); + std.debug.assert(ty.structFieldCount(mod) == 1); + return scalarType(ty.structFieldType(0, mod), mod); }, } }, .Union => { - if (ty.containerLayout() != .Packed) { - const layout = ty.unionGetLayout(target); + if (ty.containerLayout(mod) != .Packed) { + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, target); + return scalarType(ty.unionTagTypeSafety(mod).?, mod); } - std.debug.assert(ty.unionFields().count() == 1); + std.debug.assert(ty.unionFields(mod).count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, target); + return scalarType(ty.unionFields(mod).values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b614200e41..a1b57516ee 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig"); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); @@ -112,10 +113,10 @@ const Owner = union(enum) { mod_fn: *const Module.Fn, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner) Module.Decl.Index { + fn getDecl(owner: Owner, mod: *Module) Module.Decl.Index { return switch (owner) { .mod_fn => |mod_fn| mod_fn.owner_decl, - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(), + .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod), }; } @@ -447,7 +448,7 @@ const InstTracking = struct { else => unreachable, } tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.air.typeOfIndex(inst), self.long, self.short); + try function.genCopy(function.typeOfIndex(inst), self.long, self.short); } fn reuseFrame(self: *InstTracking) void { @@ -537,7 +538,7 @@ const InstTracking = struct { inst: Air.Inst.Index, target: InstTracking, ) !void { - const ty = function.air.typeOfIndex(inst); + const ty = function.typeOfIndex(inst); if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) try function.genCopy(ty, target.long, self.short); try function.genCopy(ty, target.short, self.short); @@ -605,14 +606,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, target: Target) FrameAlloc { - return init(.{ .size = ty.abiSize(target), .alignment = ty.abiAlignment(target) }); + fn initType(ty: Type, mod: *Module) FrameAlloc { + return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; const StackAllocation = struct { inst: ?Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*) + /// TODO do we need size? should be determined by inst.ty.abiSize(mod) size: u32, }; @@ -631,7 +632,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -642,6 +643,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -686,7 +688,7 @@ pub fn generate( @enumToInt(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = if (mod.align_stack_fns.get(module_fn)) |set_align_stack| + .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| set_align_stack.alignment else 1, @@ -697,7 +699,8 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = 1 }), ); - var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) { + const fn_info = mod.typeToFunc(fn_type).?; + var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create( @@ -714,12 +717,12 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@enumToInt(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*), call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align), })); function.frame_allocs.set(@enumToInt(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*) * 2, call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align), })); function.frame_allocs.set( @enumToInt(FrameIndex.args_frame), @@ -1565,7 +1568,8 @@ fn asmMemoryRegisterImmediate( } fn gen(self: *Self) InnerError!void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); @@ -1582,7 +1586,7 @@ fn gen(self: *Self) InnerError!void { // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(Type.usize, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); try self.genSetMem( .{ .frame = frame_index }, 0, @@ -1724,6 +1728,8 @@ fn gen(self: *Self) InnerError!void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { @@ -1732,7 +1738,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -1916,8 +1922,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1999,7 +2004,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { - switch (lazy_sym.ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lazy_sym.ty.zigTypeTag(mod)) { .Enum => { const enum_ty = lazy_sym.ty; wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); @@ -2011,7 +2017,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const ret_reg = param_regs[0]; const enum_mcv = MCValue{ .register = param_regs[1] }; - var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); + var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount(mod)); defer self.gpa.free(exitlude_jump_relocs); const data_reg = try self.register_manager.allocReg(null, gp); @@ -2020,16 +2026,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty }); var data_off: i32 = 0; - for ( - exitlude_jump_relocs, - enum_ty.enumFields().keys(), - 0.., - ) |*exitlude_jump_reloc, tag_name, index| { - var tag_pl = Value.Payload.U32{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { + const index = @intCast(u32, index_usize); + const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); @@ -2092,10 +2092,8 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - switch (self.air.instructions.items(.tag)[inst]) { - .constant, .const_ty => unreachable, - else => self.inst_tracking.getPtr(inst).?.die(self, inst), - } + assert(self.air.instructions.items(.tag)[inst] != .interned); + self.inst_tracking.getPtr(inst).?.die(self, inst); } /// Called when there are no operands, and the instruction is always unreferenced. @@ -2126,10 +2124,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live const dies = @truncate(u1, tomb_bits) != 0; tomb_bits >>= 1; if (!dies) continue; - const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); - self.processDeath(op_index); + self.processDeath(Air.refToIndexAllowNone(op) orelse continue); } self.finishAirResult(inst, result); } @@ -2252,19 +2247,19 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { - const ptr_ty = self.air.typeOfIndex(inst); - const val_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const ptr_ty = self.typeOfIndex(inst); + const val_ty = ptr_ty.childType(mod); return self.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); }, - .alignment = @max(ptr_ty.ptrAlignment(self.target.*), 1), + .alignment = @max(ptr_ty.ptrAlignment(mod), 1), })); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - return self.allocRegOrMemAdvanced(self.air.typeOfIndex(inst), inst, reg_ok); + return self.allocRegOrMemAdvanced(self.typeOfIndex(inst), inst, reg_ok); } fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { @@ -2272,20 +2267,20 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { } fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; if (reg_ok) need_mem: { - if (abi_size <= @as(u32, switch (ty.zigTypeTag()) { + if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16, 32, 64, 128 => 16, 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, else => unreachable, @@ -2294,18 +2289,18 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b }, else => 8, })) { - if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(ty, mod))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, mod)); return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { - return switch (ty.zigTypeTag()) { +fn regClassForType(ty: Type, mod: *Module) RegisterManager.RegisterBitSet { + return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, }; @@ -2449,7 +2444,8 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg = try self.register_manager.allocReg(null, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2464,7 +2460,8 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2481,7 +2478,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .load_frame => .{ .register_offset = .{ .reg = (try self.copyToRegisterWithInstTracking( inst, - self.air.typeOfIndex(inst), + self.typeOfIndex(inst), self.ret_mcv.long, )).register, .off = self.ret_mcv.short.indirect.off, @@ -2492,9 +2489,9 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2558,9 +2555,9 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2618,14 +2615,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); - const src_int_info = src_ty.intInfo(self.target.*); + const src_ty = self.typeOf(ty_op.operand); + const src_int_info = src_ty.intInfo(mod); - const dst_ty = self.air.typeOfIndex(inst); - const dst_int_info = dst_ty.intInfo(self.target.*); - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_ty = self.typeOfIndex(inst); + const dst_int_info = dst_ty.intInfo(mod); + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2670,14 +2668,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const high_bits = src_int_info.bits % 64; if (high_bits > 0) { - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (extend) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = high_bits, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(extend, high_bits); try self.truncateRegister(high_ty, high_reg); try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg }); } @@ -2706,12 +2697,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_ty = self.typeOfIndex(inst); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_ty = self.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2724,13 +2716,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - if (dst_ty.zigTypeTag() == .Vector) { - assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(self.target.*); - const src_info = src_ty.childType().intInfo(self.target.*); + if (dst_ty.zigTypeTag(mod) == .Vector) { + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); + const dst_info = dst_ty.childType(mod).intInfo(mod); + const src_info = src_ty.childType(mod).intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { - 16 => switch (dst_ty.vectorLen()) { + 16 => switch (dst_ty.vectorLen(mod)) { 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, 9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null, else => null, @@ -2738,7 +2730,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, 16 => switch (src_info.bits) { - 32 => switch (dst_ty.vectorLen()) { + 32 => switch (dst_ty.vectorLen(mod)) { 1...4 => if (self.hasFeature(.avx)) .{ .vp_w, .ackusd } else if (self.hasFeature(.sse4_1)) @@ -2755,29 +2747,21 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { dst_ty.fmt(self.bin_file.options.module.?), }); - var mask_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits), - }; - const mask_val = Value.initPayload(&mask_pl.base); + const elem_ty = src_ty.childType(mod); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); - var splat_pl = Value.Payload.SubValue{ - .base = .{ .tag = .repeated }, - .data = mask_val, - }; - const splat_val = Value.initPayload(&splat_pl.base); + const splat_ty = try mod.vectorType(.{ + .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .child = elem_ty.ip_index, + }); + const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); - var full_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits), - .elem_type = src_ty.childType(), - }, - }; - const full_ty = Type.initPayload(&full_pl.base); - const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*)); + const splat_val = try mod.intern(.{ .aggregate = .{ + .ty = splat_ty.ip_index, + .storage = .{ .repeated_elem = mask_val.ip_index }, + } }); - const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); + const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() }); const splat_addr_mcv: MCValue = switch (splat_mcv) { .memory, .indirect, .load_frame => splat_mcv.address(), else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) }, @@ -2789,14 +2773,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { .{ .vp_, .@"and" }, dst_reg, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); } else { try self.asmRegisterMemory( .{ .p_, .@"and" }, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); } @@ -2819,7 +2803,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const operand = try self.resolveInst(un_op); const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand)) @@ -2831,20 +2815,21 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), len_ty, len, ); @@ -2873,23 +2858,24 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { + const mod = self.bin_file.options.module.?; const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); - const dst_ty = self.air.typeOf(dst_air); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_ty = self.typeOf(dst_air); + const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { - .constant => { - const src_val = self.air.values[air_data[inst].ty_pl.payload]; + .interned => { + const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; - const src_int = src_val.toBigInt(&space, self.target.*); + const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { - const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); - const src_info = src_ty.intInfo(self.target.*); + const src_ty = self.typeOf(air_data[inst].ty_op.operand); + const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { .signed => src_info.bits, @@ -2908,20 +2894,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; - const dst_ty = self.air.typeOfIndex(inst); - switch (dst_ty.zigTypeTag()) { + const dst_ty = self.typeOfIndex(inst); + switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, } - const dst_info = dst_ty.intInfo(self.target.*); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = switch (tag) { + const dst_info = dst_ty.intInfo(mod); + const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, .mul, .mulwrap => math.max3( self.activeIntBits(bin_op.lhs), @@ -2929,8 +2913,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { dst_info.bits / 2, ), .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits, - } }; - const src_ty = Type.initPayload(&src_pl.base); + }); try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); @@ -2942,8 +2925,9 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -2968,7 +2952,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -2994,7 +2978,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3005,14 +2989,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3020,8 +3004,9 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3046,7 +3031,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3076,14 +3061,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3091,8 +3076,9 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillRegisters(&.{ .rax, .rdx }); const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); @@ -3118,7 +3104,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { try self.genSetReg(limit_reg, ty, lhs_mcv); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); @@ -3134,7 +3120,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3145,12 +3131,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(bin_op.lhs); - switch (ty.zigTypeTag()) { + const ty = self.typeOf(bin_op.lhs); + switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3160,13 +3147,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }, bin_op.lhs, bin_op.rhs); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, }; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3177,16 +3164,16 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), ty, partial_mcv, ); @@ -3194,7 +3181,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3205,12 +3192,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); - switch (lhs_ty.zigTypeTag()) { + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3219,7 +3207,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty); const partial_lock = switch (partial_mcv) { @@ -3238,7 +3226,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs); const cc = Condition.ne; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3249,24 +3237,24 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - tuple_ty.structFieldType(0), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + tuple_ty.structFieldType(0, mod), partial_mcv, ); break :result .{ .load_frame = .{ .index = frame_index } }; } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3283,29 +3271,20 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv: MCValue, overflow_cc: ?Condition, ) !void { + const mod = self.bin_file.options.module.?; const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const ty = tuple_ty.structFieldType(0); - const int_info = ty.intInfo(self.target.*); + const ty = tuple_ty.structFieldType(0, mod); + const int_info = ty.intInfo(mod); - var hi_limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = (int_info.bits - 1) % 64 + 1, - }; - const hi_limb_ty = Type.initPayload(&hi_limb_pl.base); + const hi_limb_bits = (int_info.bits - 1) % 64 + 1; + const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits); - var rest_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = int_info.bits - hi_limb_pl.data, - }; - const rest_ty = Type.initPayload(&rest_pl.base); + const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits); const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); @@ -3335,7 +3314,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); + const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3345,23 +3324,24 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const dst_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = switch (dst_ty.zigTypeTag()) { + const dst_ty = self.typeOf(bin_op.lhs); + const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); const cc: Condition = switch (dst_info.signedness) { .unsigned => .c, .signed => .o, @@ -3369,16 +3349,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) }; - const src_ty = Type.initPayload(&src_pl.base); + const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const extra_bits = if (dst_info.bits <= 64) self.regExtraBits(dst_ty) else @@ -3391,27 +3368,27 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; } else { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => { // For now, this is the only supported multiply that doesn't fit in a register. - assert(dst_info.bits <= 128 and src_pl.data == 64); + assert(dst_info.bits <= 128 and src_bits == 64); const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - tuple_ty.structFieldType(0), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); } else try self.genSetFrameTruncatedOverflowCompare( @@ -3433,7 +3410,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3472,8 +3450,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const int_info = ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, lhs), @@ -3531,8 +3510,8 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); @@ -3549,7 +3528,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOfIndex(inst); + const pl_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -3574,7 +3553,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -3585,14 +3564,15 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); - const opt_ty = src_ty.childType(); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const opt_ty = src_ty.childType(mod); const src_mcv = try self.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { break :result if (self.liveness.isUnused(inst)) .unreach else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -3609,8 +3589,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const pl_ty = dst_ty.childType(); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_ty = dst_ty.childType(mod); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3618,22 +3598,23 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); - const err_ty = err_union_ty.errorUnionSet(); - const payload_ty = err_union_ty.errorUnionPayload(); + const err_union_ty = self.typeOf(ty_op.operand); + const err_ty = err_union_ty.errorUnionSet(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { break :result MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result operand; } - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, mod); switch (operand) { .register => |reg| { // TODO reuse operand @@ -3666,7 +3647,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3678,12 +3659,13 @@ fn genUnwrapErrorUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { - const payload_ty = err_union_ty.errorUnionPayload(); + const mod = self.bin_file.options.module.?; + const payload_ty = err_union_ty.errorUnionPayload(mod); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -3720,9 +3702,10 @@ fn genUnwrapErrorUnionPayloadMir( // *(E!T) -> E fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3736,11 +3719,11 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const eu_ty = src_ty.childType(); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const eu_ty = src_ty.childType(mod); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3755,9 +3738,10 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { // *(E!T) -> *T fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3766,7 +3750,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3775,10 +3759,10 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const eu_ty = src_ty.childType(); - const pl_ty = eu_ty.errorUnionPayload(); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const eu_ty = src_ty.childType(mod); + const pl_ty = eu_ty.errorUnionPayload(mod); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3789,9 +3773,10 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3800,11 +3785,11 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const eu_ty = src_ty.childType(); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const eu_ty = src_ty.childType(mod); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3816,7 +3801,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) break :result .unreach; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3824,8 +3809,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3853,14 +3838,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 }; + const pl_ty = self.typeOf(ty_op.operand); + if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; - const opt_ty = self.air.typeOfIndex(inst); + const opt_ty = self.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); - const same_repr = opt_ty.optionalReprIsPayload(); + const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; const pl_lock: ?RegisterLock = switch (pl_mcv) { @@ -3873,7 +3859,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); switch (opt_mcv) { else => unreachable, @@ -3900,19 +3886,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3922,18 +3909,19 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3949,7 +3937,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -3974,9 +3962,10 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3985,7 +3974,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3994,7 +3983,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4010,7 +3999,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -4041,7 +4030,8 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { - const slice_ty = self.air.typeOf(lhs); + const mod = self.bin_file.options.module.?; + const slice_ty = self.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4049,12 +4039,11 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { }; defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const elem_ty = slice_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); - const index_ty = self.air.typeOf(rhs); + const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); const index_mcv_lock: ?RegisterLock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4077,11 +4066,11 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, slice_ptr_field_type, elem_ptr); @@ -4097,9 +4086,10 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array = try self.resolveInst(bin_op.lhs); const array_lock: ?RegisterLock = switch (array) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4107,10 +4097,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { }; defer if (array_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_ty = array_ty.childType(mod); + const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4125,7 +4115,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, gp); switch (array) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array); try self.asmRegisterMemory( .{ ._, .lea }, @@ -4162,15 +4152,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - const index_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4207,10 +4198,11 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.lhs); + const ptr_ty = self.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4218,9 +4210,9 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = elem_ty.abiSize(self.target.*); - const index_ty = self.air.typeOf(extra.rhs); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = elem_ty.abiSize(mod); + const index_ty = self.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4239,11 +4231,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_union_ty = self.air.typeOf(bin_op.lhs); - const union_ty = ptr_union_ty.childType(); - const tag_ty = self.air.typeOf(bin_op.rhs); - const layout = union_ty.unionGetLayout(self.target.*); + const ptr_union_ty = self.typeOf(bin_op.lhs); + const union_ty = ptr_union_ty.childType(mod); + const tag_ty = self.typeOf(bin_op.rhs); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -4275,20 +4268,19 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl = ptr_union_ty.ptrInfo(); - ptr_tag_pl.data.pointee_type = tag_ty; - const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); + const ptr_tag_ty = try mod.adjustPtrTypeChild(ptr_union_ty, tag_ty); try self.store(ptr_tag_ty, adjusted_ptr, tag); return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const tag_ty = self.air.typeOfIndex(inst); - const union_ty = self.air.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(self.target.*); + const tag_ty = self.typeOfIndex(inst); + const union_ty = self.typeOf(ty_op.operand); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); @@ -4302,7 +4294,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const tag_abi_size = tag_ty.abiSize(self.target.*); + const tag_abi_size = tag_ty.abiSize(mod); const dst_mcv: MCValue = blk: { switch (operand) { .load_frame => |frame_addr| { @@ -4337,10 +4329,11 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airClz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4358,7 +4351,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); if (self.hasFeature(.lzcnt)) { if (src_bits <= 8) { const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); @@ -4405,7 +4398,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } if (src_bits > 64) - return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)}); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -4422,7 +4415,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4449,7 +4442,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4465,11 +4458,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = src_ty.bitSize(self.target.*); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4548,7 +4542,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4560,10 +4554,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4729,16 +4724,17 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true); switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4749,10 +4745,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4847,7 +4844,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4858,17 +4855,18 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); - const abi_size: u32 = switch (ty.abiSize(self.target.*)) { + const ty = self.typeOf(un_op); + const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), }; - const scalar_bits = ty.scalarType().floatBits(self.target.*); + const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); const src_mcv = try self.resolveInst(un_op); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -4884,42 +4882,14 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - var arena = std.heap.ArenaAllocator.init(self.gpa); - defer arena.deinit(); + const vec_ty = try mod.vectorType(.{ + .len = @divExact(abi_size * 8, scalar_bits), + .child = (try mod.intType(.signed, scalar_bits)).ip_index, + }); - const ExpectedContents = struct { - scalar: union { - i64: Value.Payload.I64, - big: struct { - limbs: [ - @max( - std.math.big.int.Managed.default_capacity, - std.math.big.int.calcTwosCompLimbCount(128), - ) - ]std.math.big.Limb, - pl: Value.Payload.BigInt, - }, - }, - repeated: Value.Payload.SubValue, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - - var int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_signed }, - .data = scalar_bits, - }; - var vec_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = Type.initPayload(&int_pl.base), - }, - }; - const vec_ty = Type.initPayload(&vec_pl.base); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), self.target.*), - .fabs => try vec_ty.maxInt(stack.get(), self.target.*), + .neg => try vec_ty.minInt(mod, vec_ty), + .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; @@ -4993,7 +4963,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5008,25 +4978,26 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { } fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void { + const mod = self.bin_file.options.module.?; if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null, @@ -5041,7 +5012,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5078,9 +5049,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const ty = self.typeOf(un_op); + const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5092,7 +5064,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -5114,9 +5086,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) { 1 => { try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5167,13 +5139,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => null, } else null, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null, @@ -5186,7 +5158,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => unreachable, })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( @@ -5274,10 +5246,11 @@ fn reuseOperandAdvanced( } fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const ptr_info = ptr_ty.ptrInfo().data; + const mod = self.bin_file.options.module.?; + const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.pointee_type; - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); @@ -5347,7 +5320,8 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn } fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const dst_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const dst_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5382,20 +5356,21 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.air.typeOf(ty_op.operand); - const elem_size = elem_ty.abiSize(self.target.*); + const ptr_ty = self.typeOf(ty_op.operand); + const elem_size = elem_ty.abiSize(mod); - const elem_rc = regClassForType(elem_ty); - const ptr_rc = regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty, mod); + const ptr_rc = regClassForType(ptr_ty, mod); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -5405,7 +5380,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { else try self.allocRegOrMem(inst, true); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv); } else { try self.load(dst_mcv, ptr_ty, ptr_mcv); @@ -5416,13 +5391,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const ptr_info = ptr_ty.ptrInfo().data; - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const ptr_info = ptr_ty.ptrInfo(mod); + const src_ty = ptr_ty.childType(mod); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; - const src_bit_size = src_ty.bitSize(self.target.*); + const src_bit_size = src_ty.bitSize(mod); const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const src_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -5489,7 +5465,8 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In } fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const src_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5524,6 +5501,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr } fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -5531,9 +5509,9 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_mcv = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); } else { try self.store(ptr_ty, ptr_mcv, src_mcv); @@ -5555,14 +5533,15 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const ptr_field_ty = self.air.typeOfIndex(inst); - const ptr_container_ty = self.air.typeOf(operand); - const container_ty = ptr_container_ty.childType(); - const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { - .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*), - .Packed => if (container_ty.zigTypeTag() == .Struct and - ptr_field_ty.ptrInfo().data.host_size == 0) - container_ty.packedStructFieldByteOffset(index, self.target.*) + const mod = self.bin_file.options.module.?; + const ptr_field_ty = self.typeOfIndex(inst); + const ptr_container_ty = self.typeOf(operand); + const container_ty = ptr_container_ty.childType(mod); + const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => container_ty.structFieldOffset(index, mod), + .Packed => if (container_ty.zigTypeTag(mod) == .Struct and + ptr_field_ty.ptrInfo(mod).host_size == 0) + container_ty.packedStructFieldByteOffset(index, mod) else 0, }); @@ -5577,24 +5556,25 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result: MCValue = result: { const operand = extra.struct_operand; const index = extra.field_index; - const container_ty = self.air.typeOf(operand); - const container_rc = regClassForType(container_ty); - const field_ty = container_ty.structFieldType(index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; - const field_rc = regClassForType(field_ty); + const container_ty = self.typeOf(operand); + const container_rc = regClassForType(container_ty, mod); + const field_ty = container_ty.structFieldType(index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); - const field_off = switch (container_ty.containerLayout()) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8), - .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(self.target.*, index) + const field_off = switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), + .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| + struct_obj.packedFieldBitOffset(mod, index) else 0, }; @@ -5611,7 +5591,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); + const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); @@ -5733,12 +5713,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const inst_ty = self.air.typeOfIndex(inst); - const parent_ty = inst_ty.childType(); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + const inst_ty = self.typeOfIndex(inst); + const parent_ty = inst_ty.childType(mod); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5751,9 +5732,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { - const src_ty = self.air.typeOf(src_air); + const mod = self.bin_file.options.module.?; + const src_ty = self.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); - if (src_ty.zigTypeTag() == .Vector) { + if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } @@ -5786,28 +5768,22 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8)); - const int_info = if (src_ty.tag() == .bool) + const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); + const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else - src_ty.intInfo(self.target.*); + src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - var limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)), - }; - const limb_ty = Type.initPayload(&limb_pl.base); + const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, else => dst_mcv.address().offset(byte_off).deref(), }; - if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); + if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { + const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5819,7 +5795,8 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: } fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5866,6 +5843,7 @@ fn genShiftBinOpMir( lhs_mcv: MCValue, shift_mcv: MCValue, ) !void { + const mod = self.bin_file.options.module.?; const rhs_mcv: MCValue = rhs: { switch (shift_mcv) { .immediate => |imm| switch (imm) { @@ -5880,7 +5858,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -6099,13 +6077,14 @@ fn genShiftBinOp( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - if (lhs_ty.zigTypeTag() == .Vector) { + const mod = self.bin_file.options.module.?; + if (lhs_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } - assert(rhs_ty.abiSize(self.target.*) == 1); + assert(rhs_ty.abiSize(mod) == 1); - const lhs_abi_size = lhs_ty.abiSize(self.target.*); + const lhs_abi_size = lhs_ty.abiSize(mod); if (lhs_abi_size > 16) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } @@ -6136,7 +6115,7 @@ fn genShiftBinOp( break :dst dst_mcv; }; - const signedness = lhs_ty.intInfo(self.target.*).signedness; + const signedness = lhs_ty.intInfo(mod).signedness; try self.genShiftBinOpMir(switch (air_tag) { .shl, .shl_exact => switch (signedness) { .signed => .{ ._l, .sa }, @@ -6163,11 +6142,12 @@ fn genMulDivBinOp( lhs: MCValue, rhs: MCValue, ) !MCValue { - if (dst_ty.zigTypeTag() == .Vector or dst_ty.zigTypeTag() == .Float) { + const mod = self.bin_file.options.module.?; + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6184,7 +6164,7 @@ fn genMulDivBinOp( const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); - const signedness = ty.intInfo(self.target.*).signedness; + const signedness = ty.intInfo(mod).signedness; switch (tag) { .mul, .mulwrap, @@ -6338,13 +6318,14 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { - const lhs_ty = self.air.typeOf(lhs_air); - const rhs_ty = self.air.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const lhs_ty = self.typeOf(lhs_air); + const rhs_ty = self.typeOf(rhs_air); + const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { else => null, - .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias( + .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias( if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: { try self.register_manager.getReg(.xmm0, null); break :mask .xmm0; @@ -6384,7 +6365,7 @@ fn genBinOp( else => false, }; - const vec_op = switch (lhs_ty.zigTypeTag()) { + const vec_op = switch (lhs_ty.zigTypeTag(mod)) { else => false, .Float, .Vector => true, }; @@ -6456,7 +6437,7 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2().abiSize(self.target.*); + const elem_size = lhs_ty.elemType2(mod).abiSize(mod); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); try self.genBinOpMir( switch (air_tag) { @@ -6506,7 +6487,7 @@ fn genBinOp( try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => switch (air_tag) { .min => .a, @@ -6520,7 +6501,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6581,7 +6562,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -6657,10 +6638,10 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) { - 8 => switch (lhs_ty.vectorLen()) { + .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) { + 8 => switch (lhs_ty.vectorLen(mod)) { 1...16 => switch (air_tag) { .add, .addwrap, @@ -6671,7 +6652,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6685,7 +6666,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6711,11 +6692,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6723,7 +6704,7 @@ fn genBinOp( }, else => null, }, - 16 => switch (lhs_ty.vectorLen()) { + 16 => switch (lhs_ty.vectorLen(mod)) { 1...8 => switch (air_tag) { .add, .addwrap, @@ -6737,7 +6718,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6747,7 +6728,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6772,11 +6753,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6784,7 +6765,7 @@ fn genBinOp( }, else => null, }, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => switch (air_tag) { .add, .addwrap, @@ -6803,7 +6784,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6817,7 +6798,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6846,11 +6827,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -6858,7 +6839,7 @@ fn genBinOp( }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => switch (air_tag) { .add, .addwrap, @@ -6887,8 +6868,8 @@ fn genBinOp( }, else => null, }, - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen(mod)) { 1 => { const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -7063,7 +7044,7 @@ fn genBinOp( }, else => null, } else null, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, @@ -7101,7 +7082,7 @@ fn genBinOp( } else null, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, @@ -7206,21 +7187,21 @@ fn genBinOp( const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); try self.asmRegisterRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .cmp }, 64 => .{ .v_sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_ss, .cmp }, 2...8 => .{ .v_ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_sd, .cmp }, 2...4 => .{ .v_pd, .cmp }, else => null, @@ -7240,20 +7221,20 @@ fn genBinOp( Immediate.u(3), // unord ); try self.asmRegisterRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ps, .blendv }, 64 => .{ .v_pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...8 => .{ .v_ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ .v_pd, .blendv }, else => null, }, @@ -7274,21 +7255,21 @@ fn genBinOp( } else { const has_blend = self.hasFeature(.sse4_1); try self.asmRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ss, .cmp }, 64 => .{ ._sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._ss, .cmp }, 2...4 => .{ ._ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._sd, .cmp }, 2 => .{ ._pd, .cmp }, else => null, @@ -7307,20 +7288,20 @@ fn genBinOp( Immediate.u(if (has_blend) 3 else 7), // unord, ord ); if (has_blend) try self.asmRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .blendv }, 64 => .{ ._pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .blendv }, else => null, }, @@ -7338,20 +7319,20 @@ fn genBinOp( mask_reg, ) else { try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"and" }, 64 => .{ ._pd, .@"and" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"and" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"and" }, else => null, }, @@ -7368,20 +7349,20 @@ fn genBinOp( mask_reg, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .andn }, 64 => .{ ._pd, .andn }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .andn }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .andn }, else => null, }, @@ -7398,20 +7379,20 @@ fn genBinOp( lhs_copy_reg.?, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"or" }, 64 => .{ ._pd, .@"or" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"or" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"or" }, else => null, }, @@ -7442,7 +7423,8 @@ fn genBinOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7562,11 +7544,7 @@ fn genBinOpMir( .load_got, .load_tlv, => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); + const ptr_ty = try mod.singleConstPtrType(ty); const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, @@ -7640,7 +7618,7 @@ fn genBinOpMir( defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); const ty_signedness = - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned; const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) { .signed => Type.usize, .unsigned => Type.isize, @@ -7796,7 +7774,8 @@ fn genBinOpMir( /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7896,6 +7875,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -7909,9 +7889,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { else => return self.fail("TODO implement arg for {}", .{dst_mcv}), } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.owner.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = self.owner.mod_fn.getParamName(mod, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -7920,6 +7900,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { + const mod = self.bin_file.options.module.?; switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { @@ -7938,7 +7919,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genArgDbgInfo(name, ty, self.owner.getDecl(), loc); + try dw.genArgDbgInfo(name, ty, self.owner.getDecl(mod), loc); }, .plan9 => {}, .none => {}, @@ -7952,6 +7933,7 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { + const mod = self.bin_file.options.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -7982,7 +7964,7 @@ fn genVarDbgInfo( // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genVarDbgInfo(name, ty, self.owner.getDecl(), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.owner.getDecl(mod), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -8022,20 +8004,23 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; - var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame); + const fn_info = mod.typeToFunc(fn_ty).?; + + var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame); defer info.deinit(self); // We need a properly aligned and sized call frame to be able to call this function. @@ -8062,7 +8047,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, } for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none => {}, @@ -8076,8 +8061,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_lock = switch (info.return_value.long) { .none, .unreach => null, .indirect => |reg_off| lock: { - const ret_ty = fn_ty.fnReturnType(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, self.target.*)); + const ret_ty = fn_info.return_type.toType(); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); @@ -8089,7 +8074,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier defer if (ret_lock) |lock| self.register_manager.unlockReg(lock); for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none, .load_frame => {}, @@ -8100,15 +8085,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { - if (if (func_value.castTag(.function)) |func_payload| - func_payload.data.owner_decl - else if (func_value.castTag(.decl_ref)) |decl_ref_payload| - decl_ref_payload.data - else - null) |owner_decl| - { + if (try self.air.value(callee, mod)) |func_value| { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + if (switch (func_key) { + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => null, + }, + else => null, + }) |owner_decl| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl); const atom = elf_file.getAtom(atom_index); @@ -8141,10 +8127,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .disp = @intCast(i32, fn_got_addr), })); } else unreachable; - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); @@ -8178,7 +8163,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(.rax, Type.usize, mcv); try self.asmRegister(.{ ._, .call }, .rax); @@ -8193,9 +8178,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv.short) { .none => {}, .register => try self.genCopy(ret_ty, self.ret_mcv.short, operand), @@ -8219,7 +8205,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); switch (self.ret_mcv.short) { .none => {}, .register => try self.load(self.ret_mcv.short, ptr_ty, ptr), @@ -8234,8 +8220,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8255,9 +8242,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result = MCValue{ - .eflags = switch (ty.zigTypeTag()) { + .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(self.target.*)); + const abi_size = @intCast(u16, ty.abiSize(mod)); const may_flip: enum { may_flip, must_flip, @@ -8290,7 +8277,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (src_lock) |lock| self.register_manager.unlockReg(lock); break :result Condition.fromCompareOperator( - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned, result_op: { const flipped_op = if (flipped) op.reverse() else op; if (abi_size > 8) switch (flipped_op) { @@ -8404,7 +8391,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), 32 => try self.genBinOpMir( .{ ._ss, .ucomi }, @@ -8419,7 +8406,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { src_mcv, ), else => return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), } @@ -8453,8 +8440,8 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const op_ty = self.air.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*)); + const op_ty = self.typeOf(un_op); + const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8473,16 +8460,17 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); + const err_union_ty = self.typeOf(pl_op.operand); const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false); return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8546,8 +8534,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); @@ -8561,7 +8550,7 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -8573,7 +8562,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .eflags => |cc| { // Here we map the opposites since the jump is to the false branch. @@ -8602,7 +8592,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; @@ -8646,6 +8636,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; switch (opt_mcv) { .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() }, else => {}, @@ -8654,14 +8645,12 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8681,14 +8670,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } - assert(some_info.ty.tag() == .bool); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*)); + assert(some_info.ty.ip_index == .bool_type); + const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -8707,7 +8696,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8720,7 +8709,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8742,18 +8731,17 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC } fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const opt_ty = ptr_ty.childType(); - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const opt_ty = ptr_ty.childType(mod); + const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8762,7 +8750,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8775,9 +8763,10 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { - const err_type = ty.errorUnionSet(); + const mod = self.bin_file.options.module.?; + const err_type = ty.errorUnionSet(mod); - if (err_type.errorSetIsEmpty()) { + if (err_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } @@ -8786,7 +8775,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(mod), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); @@ -8844,7 +8833,7 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNull(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8852,7 +8841,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNullPtr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8860,7 +8849,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNull(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8871,7 +8860,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNullPtr(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8882,12 +8871,13 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8905,10 +8895,10 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isErr(inst, ptr_ty.childType(), operand); + const result = try self.isErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8916,12 +8906,13 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNonErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8939,10 +8930,10 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isNonErr(inst, ptr_ty.childType(), operand); + const result = try self.isNonErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -9005,7 +8996,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = try self.resolveInst(pl_op.operand); - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; @@ -9088,12 +9079,13 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { } fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); - const block_ty = self.air.typeOfIndex(br.block_inst); + const block_ty = self.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; const block_data = self.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -9216,7 +9208,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(reg, self.air.typeOf(input), arg_mcv); + try self.genSetReg(reg, self.typeOf(input), arg_mcv); } { @@ -9402,7 +9394,8 @@ const MoveStrategy = union(enum) { }; }; fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { - switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (ty.zigTypeTag(mod)) { else => return .{ .move = .{ ._, .mov } }, .Float => switch (ty.floatBits(self.target.*)) { 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ @@ -9419,9 +9412,9 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Int => switch (ty.childType().intInfo(self.target.*).bits) { - 8 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Int => switch (ty.childType(mod).intInfo(mod).bits) { + 8 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, .extract = .{ .vp_b, .extr }, @@ -9451,7 +9444,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 16 => switch (ty.vectorLen()) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9474,7 +9467,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_d, .mov } else @@ -9490,7 +9483,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_q, .mov } else @@ -9502,7 +9495,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9510,15 +9503,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 256 => switch (ty.vectorLen()) { + 256 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, }, - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => switch (ty.vectorLen()) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9541,7 +9534,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else @@ -9557,7 +9550,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else @@ -9569,7 +9562,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9647,7 +9640,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError } fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9730,7 +9724,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { else => switch (abi_size) { 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9738,7 +9732,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, else => null, }, - .Float => switch (ty.scalarType().floatBits(self.target.*)) { + .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) { 16, 128 => switch (abi_size) { 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9789,7 +9783,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .indirect => try self.moveStrategy(ty, false), .load_frame => |frame_addr| try self.moveStrategy( ty, - self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod), ), .lea_frame => .{ .move = .{ ._, .lea } }, else => unreachable, @@ -9821,7 +9815,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, @bitCast(u32, small_addr), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), .insert_extract => |ie| try self.asmRegisterMemoryImmediate( @@ -9839,7 +9833,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ), } }, - .load_direct => |sym_index| switch (ty.zigTypeTag()) { + .load_direct => |sym_index| switch (ty.zigTypeTag(mod)) { else => { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ @@ -9933,7 +9927,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -9945,7 +9940,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genInlineMemset(dst_ptr_mcv, .{ .immediate = 0xaa }, .{ .immediate = abi_size }), .immediate => |imm| switch (abi_size) { 1, 2, 4 => { - const immediate = if (ty.isSignedInt()) + const immediate = if (ty.isSignedInt(mod)) Immediate.s(@truncate(i32, @bitCast(i64, imm))) else Immediate.u(@intCast(u32, imm)); @@ -9967,7 +9962,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), - if (ty.isSignedInt()) + if (ty.isSignedInt(mod)) Immediate.s(@truncate( i32, @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), @@ -9991,19 +9986,19 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .none => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), else => false, }, .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, - ) >= ty.abiAlignment(self.target.*), + ) >= ty.abiAlignment(mod), })) { .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias), .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate( @@ -10017,14 +10012,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, self.target.*)), - ty.structFieldType(0), + disp + @intCast(i32, ty.structFieldOffset(0, mod)), + ty.structFieldType(0, mod), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, self.target.*)), - ty.structFieldType(1), + disp + @intCast(i32, ty.structFieldOffset(1, mod)), + ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, ); }, @@ -10138,7 +10133,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -10146,13 +10141,14 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const result = result: { - const dst_rc = regClassForType(dst_ty); - const src_rc = regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty, mod); + const src_rc = regClassForType(src_ty, mod); const src_mcv = try self.resolveInst(ty_op.operand); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -10172,13 +10168,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); - const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); + const abi_size = @intCast(u16, dst_ty.abiSize(mod)); + const bit_size = @intCast(u16, dst_ty.bitSize(mod)); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10192,14 +10188,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const high_lock = self.register_manager.lockReg(high_reg); defer if (high_lock) |lock| self.register_manager.unlockReg(lock); - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (dst_signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = bit_size % 64, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(dst_signedness, bit_size % 64); try self.truncateRegister(high_ty, high_reg); if (!dst_mcv.isRegister()) try self.genCopy( @@ -10213,19 +10202,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const slice_ty = self.air.typeOfIndex(inst); - const ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ty = self.typeOfIndex(inst); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = array_ty.arrayLen(); + const array_ty = ptr_ty.childType(mod); + const array_len = array_ty.arrayLen(mod); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), Type.usize, .{ .immediate = array_len }, ); @@ -10235,20 +10225,21 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; - const dst_ty = self.air.typeOfIndex(inst); + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; + const dst_ty = self.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { .signed => src_bits, .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -10261,12 +10252,12 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { .Float => switch (dst_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, @@ -10275,7 +10266,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { }, else => null, })) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const dst_alias = dst_reg.to128(); const src_alias = registerAlias(src_reg, src_size); @@ -10288,13 +10279,14 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const dst_ty = self.air.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { .signed => dst_bits, @@ -10312,13 +10304,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { .Float => switch (src_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, @@ -10339,12 +10331,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.ptr); - const val_ty = self.air.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const ptr_ty = self.typeOf(extra.ptr); + const val_ty = self.typeOf(extra.expected_value); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10433,6 +10426,7 @@ fn atomicOp( rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -10445,7 +10439,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,8 +10533,8 @@ fn atomicOp( .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv), .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv), .Min, .Max => { - const cc: Condition = switch (if (val_ty.isAbiInt()) - val_ty.intInfo(self.target.*).signedness + const cc: Condition = switch (if (val_ty.isAbiInt(mod)) + val_ty.intInfo(mod).signedness else .unsigned) { .unsigned => switch (op) { @@ -10682,10 +10676,10 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { const unused = self.liveness.isUnused(inst); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const ptr_mcv = try self.resolveInst(pl_op.operand); - const val_ty = self.air.typeOf(extra.operand); + const val_ty = self.typeOf(extra.operand); const val_mcv = try self.resolveInst(extra.operand); const result = @@ -10696,7 +10690,7 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_mcv = try self.resolveInst(atomic_load.ptr); const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -10717,10 +10711,10 @@ fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_mcv = try self.resolveInst(bin_op.lhs); - const val_ty = self.air.typeOf(bin_op.rhs); + const val_ty = self.typeOf(bin_op.rhs); const val_mcv = try self.resolveInst(bin_op.rhs); const result = try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order); @@ -10728,6 +10722,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -10737,7 +10732,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10745,26 +10740,26 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_val = try self.resolveInst(bin_op.rhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = self.typeOf(bin_op.rhs); const src_val_lock: ?RegisterLock = switch (src_val) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { - const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr, .One => dst_ptr, .C, .Many => unreachable, }; - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10780,10 +10775,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // Store the first element, and then rely on memcpy copying forwards. // Length zero requires a runtime check - so we handle arrays specially // here to elide it. - switch (dst_ptr_ty.ptrSize()) { + switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); + const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(mod); // TODO: this only handles slices stored in the stack const ptr = dst_ptr; @@ -10823,13 +10817,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { try self.performReloc(skip_reloc); }, .One => { - var elem_ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_pl.base); + const elem_ptr_ty = try mod.singleMutPtrType(elem_ty); - const len = dst_ptr_ty.childType().arrayLen(); + const len = dst_ptr_ty.childType(mod).arrayLen(mod); assert(len != 0); // prevented by Sema try self.store(elem_ptr_ty, dst_ptr, src_val); @@ -10854,10 +10844,11 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10871,9 +10862,9 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { }; defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock); - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10891,14 +10882,14 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const inst_ty = self.air.typeOfIndex(inst); - const enum_ty = self.air.typeOf(un_op); + const inst_ty = self.typeOfIndex(inst); + const enum_ty = self.typeOf(un_op); // We need a properly aligned and sized call frame to be able to call this function. { const needed_call_frame = FrameAlloc.init(.{ - .size = inst_ty.abiSize(self.target.*), - .alignment = inst_ty.abiAlignment(self.target.*), + .size = inst_ty.abiSize(mod), + .alignment = inst_ty.abiAlignment(mod), }); const frame_allocs_slice = self.frame_allocs.slice(); const stack_frame_size = @@ -10923,7 +10914,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { try self.genLazySymbolRef( .call, .rax, - link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod), + link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(mod), mod), ); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); @@ -10933,7 +10924,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const err_ty = self.air.typeOf(un_op); + const err_ty = self.typeOf(un_op); const err_mcv = try self.resolveInst(un_op); const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); @@ -11013,17 +11004,18 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const vector_ty = self.air.typeOfIndex(inst); - const dst_rc = regClassForType(vector_ty); - const scalar_ty = vector_ty.scalarType(); + const vector_ty = self.typeOfIndex(inst); + const dst_rc = regClassForType(vector_ty, mod); + const scalar_ty = vector_ty.scalarType(mod); const src_mcv = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - switch (scalar_ty.zigTypeTag()) { + switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { - 32 => switch (vector_ty.vectorLen()) { + 32 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11103,7 +11095,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 64 => switch (vector_ty.vectorLen()) { + 64 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11169,7 +11161,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 128 => switch (vector_ty.vectorLen()) { + 128 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11233,36 +11225,37 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const result_ty = self.air.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const mod = self.bin_file.options.module.?; + const result_ty = self.typeOfIndex(inst); + const len = @intCast(usize, result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Struct => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(self.target.*) }, + .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*)); + const elem_ty = result_ty.structFieldType(elem_i, mod); + const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i); + const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); @@ -11322,10 +11315,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); + const elem_ty = result_ty.structFieldType(elem_i, mod); + const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11337,9 +11330,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { }, .Array => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); - const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); + const elem_ty = result_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11350,7 +11343,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } - if (result_ty.sentinel()) |sentinel| try self.genSetMem( + if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, elem_size * elements.len), elem_ty, @@ -11374,13 +11367,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { - const union_ty = self.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(self.target.*); + const union_ty = self.typeOfIndex(inst); + const layout = union_ty.unionGetLayout(mod); - const src_ty = self.air.typeOf(extra.init); + const src_ty = self.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); if (layout.tag_size == 0) { if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; @@ -11392,15 +11386,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; - const tag_ty = union_ty.unionTagTypeSafety().?; - const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; - const tag_val = Value.initPayload(&tag_pl.base); - var tag_int_pl: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); - const tag_int = tag_int_val.toUnsignedInt(self.target.*); + const tag_ty = union_obj.tag_ty; + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); + const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else @@ -11424,9 +11416,10 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); if (!self.hasFeature(.fma)) return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), @@ -11466,21 +11459,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mir_tag = if (@as( ?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd132 }, 64 => .{ .v_sd, .fmadd132 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd132 }, 2...8 => .{ .v_ps, .fmadd132 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd132 }, 2...4 => .{ .v_pd, .fmadd132 }, else => null, @@ -11493,21 +11486,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd213 }, 64 => .{ .v_sd, .fmadd213 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd213 }, 2...8 => .{ .v_ps, .fmadd213 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd213 }, 2...4 => .{ .v_pd, .fmadd213 }, else => null, @@ -11520,21 +11513,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd231 }, 64 => .{ .v_sd, .fmadd231 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd231 }, 2...8 => .{ .v_ps, .fmadd231 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd231 }, 2...4 => .{ .v_pd, .fmadd231 }, else => null, @@ -11555,7 +11548,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11573,22 +11566,22 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { - const ty = self.air.typeOf(ref); + const mod = self.bin_file.options.module.?; + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime()) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { - .constant => tracking: { + .interned => tracking: { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref).?, + .val = self.air.instructions.items(.data)[inst].interned.toValue(), })); break :tracking gop.value_ptr; }, - .const_ty => unreachable, else => self.inst_tracking.getPtr(inst).?, }.short; switch (mcv) { @@ -11597,13 +11590,12 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? }); + return self.genTypedValue(.{ .ty = ty, .val = (try self.air.value(ref, mod)).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { - .constant => &self.const_tracking, - .const_ty => unreachable, + .interned => &self.const_tracking, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { @@ -11634,7 +11626,8 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl())) { + const mod = self.bin_file.options.module.?; + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl(mod))) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, @@ -11666,17 +11659,23 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( self: *Self, - fn_ty: Type, + fn_info: InternPool.Key.FuncType, var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_len = fn_ty.fnParamLen(); - const param_types = try self.gpa.alloc(Type, param_len + var_args.len); + const mod = self.bin_file.options.module.?; + const cc = fn_info.cc; + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + + for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| { + dest.* = src.toType(); + } // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.air.typeOf(arg); + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| { + param_ty.* = self.typeOf(arg); + } + var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -11686,7 +11685,7 @@ fn resolveCallingConventionValues( }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); switch (cc) { .Naked => { @@ -11702,21 +11701,21 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(self.target.*)); + result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); }, else => {}, } // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11725,7 +11724,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(self.target.*)), + @intCast(u32, ret_ty.abiSize(mod)), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11744,11 +11743,11 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime()); + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11783,8 +11782,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11798,13 +11797,13 @@ fn resolveCallingConventionValues( result.stack_align = 16; // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11819,12 +11818,12 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11908,9 +11907,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { - const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{ + const mod = self.bin_file.options.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(self.target.*)), + .bits = @intCast(u16, ty.bitSize(mod)), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { @@ -11953,8 +11953,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { - const abi_size = ty.abiSize(self.target.*); - return switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); + return switch (ty.zigTypeTag(mod)) { else => switch (abi_size) { 1 => 8, 2 => 16, @@ -11971,7 +11972,8 @@ fn regBitSize(self: *Self, ty: Type) u64 { } fn regExtraBits(self: *Self, ty: Type) u64 { - return self.regBitSize(ty) - ty.bitSize(self.target.*); + const mod = self.bin_file.options.module.?; + return self.regBitSize(ty) - ty.bitSize(mod); } fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { @@ -11983,3 +11985,13 @@ fn hasAnyFeatures(self: *Self, features: anytype) bool { fn hasAllFeatures(self: *Self, features: anytype) bool { return Target.x86.featureSetHasAll(self.target.cpu.features, features); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index e79424d6d8..69df5dbf4c 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -1,10 +1,3 @@ -const std = @import("std"); -const Type = @import("../../type.zig").Type; -const Target = std.Target; -const assert = std.debug.assert; -const Register = @import("bits.zig").Register; -const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; - pub const Class = enum { integer, sse, @@ -19,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, target: Target) Class { +pub fn classifyWindows(ty: Type, mod: *Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class { // "All floating point operations are done using the 16 XMM registers." // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // as if they were integers of the same size." - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer, .Int, .Bool, @@ -43,12 +36,12 @@ pub fn classifyWindows(ty: Type, target: Target) Class { .ErrorUnion, .AnyFrame, .Frame, - => switch (ty.abiSize(target)) { + => switch (ty.abiSize(mod)) { 0 => unreachable, 1, 2, 4, 8 => return .integer, - else => switch (ty.zigTypeTag()) { + else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, - .Struct, .Union => if (ty.containerLayout() == .Packed) { + .Struct, .Union => if (ty.containerLayout(mod) == .Packed) { return .win_i128; } else { return .memory; @@ -75,14 +68,15 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { + const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, }; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag()) { - .Pointer => switch (ty.ptrSize()) { + switch (ty.zigTypeTag(mod)) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, }, .Int, .Enum, .ErrorSet => { - const bits = ty.intInfo(target).bits; + const bits = ty.intInfo(mod).bits; if (bits <= 64) { result[0] = .integer; return result; @@ -164,8 +158,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(target) * ty.arrayLen(); + const elem_ty = ty.childType(mod); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, @@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return memory_class; }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { result[0] = .integer; return result; } @@ -215,8 +209,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); - if (ty.containerLayout() == .Packed) { + const ty_size = ty.abiSize(mod); + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; @@ -227,15 +221,15 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { var result_i: usize = 0; // out of 8 var byte_i: usize = 0; // out of 8 - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } - const field_size = field.ty.abiSize(target); - const field_class_array = classifySystemV(field.ty, target, .other); + const field_size = field.ty.abiSize(mod); + const field_class_array = classifySystemV(field.ty, mod, .other); const field_class = std.mem.sliceTo(&field_class_array, .none); if (byte_i + field_size <= 8) { // Combine this field with the previous one. @@ -334,8 +328,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); - if (ty.containerLayout() == .Packed) { + const ty_size = ty.abiSize(mod); + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; @@ -344,15 +338,15 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { if (ty_size > 64) return memory_class; - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } // Combine this field with the previous one. - const field_class = classifySystemV(field.ty, target, .other); + const field_class = classifySystemV(field.ty, mod, .other); for (&result, 0..) |*result_item, i| { const field_item = field_class[i]; // "If both classes are equal, this is the resulting class." @@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return result; }, .Array => { - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty_size <= 64) { result[0] = .integer; return result; @@ -527,10 +521,17 @@ pub const RegisterClass = struct { }; }; -const testing = std.testing; -const Module = @import("../../Module.zig"); -const Value = @import("../../value.zig").Value; const builtin = @import("builtin"); +const std = @import("std"); +const Target = std.Target; +const assert = std.debug.assert; +const testing = std.testing; + +const Module = @import("../../Module.zig"); +const Register = @import("bits.zig").Register; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; +const Type = @import("../../type.zig").Type; +const Value = @import("../../value.zig").Value; fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { return .{ @@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { .is_comptime = false, }; } - -test "C_C_D" { - var fields = Module.Struct.Fields{}; - // const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 }; - try fields.ensureTotalCapacity(testing.allocator, 3); - defer fields.deinit(testing.allocator); - fields.putAssumeCapacity("v1", _field(.i8, 0)); - fields.putAssumeCapacity("v2", _field(.i8, 1)); - fields.putAssumeCapacity("v3", _field(.f64, 4)); - - var C_C_D_struct = Module.Struct{ - .fields = fields, - .namespace = undefined, - .owner_decl = undefined, - .zir_index = undefined, - .layout = .Extern, - .status = .fully_resolved, - .known_non_opv = true, - .is_tuple = false, - }; - var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct }; - - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret), - ); - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg), - ); -} diff --git a/src/codegen.zig b/src/codegen.zig index adce183833..b39c3c5ec0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -14,6 +14,7 @@ const Air = @import("Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("Compilation.zig"); const ErrorMsg = Module.ErrorMsg; +const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Module = @import("Module.zig"); const Target = std.Target; @@ -66,7 +67,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -75,17 +76,17 @@ pub fn generateFunction( switch (bin_file.options.target.cpu.arch) { .arm, .armeb, - => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, - => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .wasm32, .wasm64, - => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), else => unreachable, } } @@ -139,13 +140,14 @@ pub fn generateLazySymbol( return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output); } - if (lazy_sym.ty.isAnyError()) { + if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; - const err_names = mod.error_name_list.items; + const err_names = mod.global_error_set.keys(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); - for (err_names) |err_name| { + for (err_names) |err_name_nts| { + const err_name = mod.intern_pool.stringToSlice(err_name_nts); mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); offset += 4; try code.ensureUnusedCapacity(err_name.len + 1); @@ -154,9 +156,10 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag() == .Enum) { + } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; - for (lazy_sym.ty.enumFields().keys()) |tag_name| { + for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); try code.ensureUnusedCapacity(tag_name.len + 1); code.appendSliceAssumeCapacity(tag_name); code.appendAssumeCapacity(0); @@ -181,529 +184,110 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (arg_tv.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const target = bin_file.options.target; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const mod = bin_file.options.module.?; log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndefDeep()) { - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + if (typed_value.val.isUndefDeep(mod)) { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); - return Result.ok; + return .ok; } - switch (typed_value.ty.zigTypeTag()) { - .Fn => { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol function pointers", - .{}, - ), - }; - }, - .Float => { - switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), - 80 => { - writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - try code.appendNTimes(0, abi_size - 10); - }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try code.append(switch (simple_value) { + .false => 0, + .true => 1, else => unreachable, - } - return Result.ok; + }), }, - .Array => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); - // The bytes payload already includes the sentinel, if any - try code.ensureUnusedCapacity(len); - code.appendSliceAssumeCapacity(bytes[0..len]); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try code.ensureUnusedCapacity(bytes.len + 1); - code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); - code.appendAssumeCapacity(byte); - } - return Result.ok; - }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const sentinel = typed_value.ty.sentinel(); - const len = typed_value.ty.arrayLen(); - - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (sentinel) |sentinel_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - return Result.ok; - }, - .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(); - const sentinel_val = typed_value.ty.sentinel().?; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - return Result.ok; - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for array type value: {s}", - .{@tagName(typed_value.val.tag())}, - ), - }, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + var space: Value.BigIntSpace = undefined; + const val = typed_value.val.toBigInt(&space, mod); + val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); }, - .Pointer => switch (typed_value.val.tag()) { - .null_value => { - switch (target.ptrBitWidth()) { - 32 => { - mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); - }, - 64 => { - mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 8); - }, - else => unreachable, - } - return Result.ok; - }, - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(target); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(target); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; - - // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.initTag(.usize), - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, + .err => |err| { + const int = try mod.getErrorValue(err.name); + try code.writer().writeInt(u16, @intCast(u16, int), endian); }, - .Int => { - const info = typed_value.ty.intInfo(target); - if (info.bits <= 8) { - const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))), - }; - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, target); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - const start = code.items.len; - try code.resize(start + abi_size); - bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); - return Result.ok; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(target)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(target)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toUnsignedInt(target); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(target)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(target)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toSignedInt(target); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = typed_value.enumToInt(&int_buffer); + .error_union => |error_union| { + const payload_ty = typed_value.ty.errorUnionPayload(mod); + const err_val = switch (error_union.val) { + .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)), + .payload => @as(u16, 0), + }; - const info = typed_value.ty.intInfo(target); - if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(target)); - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for big int enums ('{}')", - .{typed_value.ty.fmt(mod)}, - ), - }; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(target)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(target)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toUnsignedInt(target); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(target)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(target)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toSignedInt(target); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Bool => { - const x: u8 = @boolToInt(typed_value.val.toBool()); - try code.append(x); - return Result.ok; - }, - .Struct => { - if (typed_value.ty.containerLayout() == .Packed) { - const struct_obj = typed_value.ty.castTag(.@"struct").?.data; - const fields = struct_obj.fields.values(); - const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - const current_pos = code.items.len; - try code.resize(current_pos + abi_size); - var bits: u16 = 0; - - for (field_vals, 0..) |field_val, index| { - const field_ty = fields[index].ty; - // pointer may point to a decl which must be marked used - // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag() == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(target)) orelse return error.Overflow; - var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); - defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, &tmp_list, debug_output, reloc_info)) { - .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), - .fail => |em| return Result{ .fail = em }, - } - } else { - field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; - } - bits += @intCast(u16, field_ty.bitSize(target)); - } - - return Result.ok; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + try code.writer().writeInt(u16, err_val, endian); + return .ok; } - const struct_begin = code.items.len; - const field_vals = typed_value.val.castTag(.aggregate).?.data; - for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index); - if (!field_ty.hasRuntimeBits()) continue; - - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - const unpadded_field_end = code.items.len - struct_begin; - - // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target); - const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .Union => { - const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(target); - - if (layout.payload_size == 0) { - return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, - .val = union_obj.tag, - }, code, debug_output, reloc_info); - } - - // Check if we should store the tag first. - if (layout.tag_align >= layout.payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data; - const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; - assert(union_ty.haveFieldTypes()); - const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits()) { - try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); - } else { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = union_obj.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow; - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - if (layout.tag_size > 0) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = union_ty.tag_ty, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (layout.padding > 0) { - try code.writer().writeByteNTimes(0, layout.padding); - } - - return Result.ok; - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = typed_value.ty.optionalChild(&opt_buf); - const is_pl = !typed_value.val.isNull(); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - - if (!payload_type.hasRuntimeBits()) { - try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); - return Result.ok; - } - - if (typed_value.ty.optionalReprIsPayload()) { - if (typed_value.val.castTag(.opt_payload)) |payload| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = payload.data, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } else if (!typed_value.val.isNull()) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } else { - try code.writer().writeByteNTimes(0, abi_size); - } - - return Result.ok; - } - - const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = value, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - try code.writer().writeByte(@boolToInt(is_pl)); - try code.writer().writeByteNTimes(0, padding); - - return Result.ok; - }, - .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(); - const payload_ty = typed_value.ty.errorUnionPayload(); - const is_payload = typed_value.val.errorUnionIsPayload(); - - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; - return generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = err_val, - }, code, debug_output, reloc_info); - } - - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_align = typed_value.ty.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); // error value first when its type is larger than the error union's payload if (error_align > payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try code.writer().writeInt(u16, err_val, endian); } // emit payload part of the error union { const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_ty, - .val = payload_val, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .payload => |payload| payload, + }.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, - .fail => |em| return Result{ .fail = em }, + .fail => |em| return .{ .fail = em }, } const unpadded_end = code.items.len - begin; const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); @@ -717,13 +301,7 @@ pub fn generateSymbol( // Payload size is larger than error set, so emit our error set last if (error_align <= payload_align) { const begin = code.items.len; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try code.writer().writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; @@ -732,198 +310,386 @@ pub fn generateSymbol( try code.writer().writeByteNTimes(0, padding); } } - - return Result.ok; }, - .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const name = typed_value.val.getError().?; - const kv = try bin_file.options.module.?.getErrorValue(name); - try code.writer().writeInt(u32, kv.value, endian); - }, - else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); - }, + .enum_tag => |enum_tag| { + const int_tag_ty = typed_value.ty.intTagType(mod); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = int_tag_ty, + .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, } - return Result.ok; }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; + .float => |float| switch (float.storage) { + .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)), + .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)), + .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), + .f80 => |f80_val| { + writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + try code.appendNTimes(0, abi_size - 10); }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - for (elem_vals[0..len]) |elem_val| { + .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), + }, + .ptr => |ptr| { + // generate ptr + switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) { + .none => typed_value.val, + else => typed_value.val.slicePtr(mod), + }.toIntern(), code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + if (ptr.len != .none) { + // generate len + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = ptr.len.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } + }, + .opt => { + const payload_type = typed_value.ty.optionalChild(mod); + const payload_val = typed_value.val.optionalValue(mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + + if (typed_value.ty.optionalReprIsPayload(mod)) { + if (payload_val) |value| { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, + .ty = payload_type, + .val = value, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } else { + try code.writer().writeByteNTimes(0, abi_size); + } + } else { + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; + if (payload_type.hasRuntimeBits(mod)) { + const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.toIntern() })).toValue(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = payload_type, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; + try code.writer().writeByte(@boolToInt(payload_val != null)); + try code.writer().writeByteNTimes(0, padding); + } + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) { + .array_type => |array_type| switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + var len_including_sentinel = + array_type.len + @boolToInt(array_type.sentinel != .none); + while (index < len_including_sentinel) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const len = typed_value.ty.arrayLen(); - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + .vector_type => |vector_type| { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + while (index < vector_type.len) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, + } + + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; - var index: u64 = 0; - while (index < len) : (index += 1) { + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + }, + .anon_struct_type => |tuple| { + const struct_begin = code.items.len; + for (tuple.types, tuple.values, 0..) |field_ty, comptime_val, index| { + if (comptime_val != .none) continue; + if (!field_ty.toType().hasRuntimeBits(mod)) continue; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, + .ty = field_ty.toType(), + .val = field_val.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse + return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + + if (struct_obj.layout == .Packed) { + const fields = struct_obj.fields.values(); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse + return error.Overflow; + const current_pos = code.items.len; + try code.resize(current_pos + abi_size); + var bits: u16 = 0; + + for (fields, 0..) |field, index| { + const field_ty = field.ty; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty.toIntern(), + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + + // pointer may point to a decl which must be marked used + // but can also result in a relocation. Therefore we handle those separately. + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse + return error.Overflow; + var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); + defer tmp_list.deinit(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val.toValue(), + }, &tmp_list, debug_output, reloc_info)) { + .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), + .fail => |em| return Result{ .fail = em }, + } + } else { + field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; + } + bits += @intCast(u16, field_ty.bitSize(mod)); + } + } else { + const struct_begin = code.items.len; + for (struct_obj.fields.values(), 0..) |field, index| { + const field_ty = field.ty; + if (!field_ty.hasRuntimeBits(mod)) continue; + + const field_val = switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty.toIntern(), + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + } }, else => unreachable, }, - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for type '{s}'", - .{@tagName(tag)}, - ) }, + .un => |un| { + const layout = typed_value.ty.unionGetLayout(mod); + + if (layout.payload_size == 0) { + return generateSymbol(bin_file, src_loc, .{ + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), + }, code, debug_output, reloc_info); + } + + // Check if we should store the tag first. + if (layout.tag_align >= layout.payload_align) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } + + const union_ty = mod.typeToUnion(typed_value.ty).?; + const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + assert(union_ty.haveFieldTypes()); + const field_ty = union_ty.fields.values()[field_index].ty; + if (!field_ty.hasRuntimeBits(mod)) { + try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); + } else { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = un.val.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + + if (layout.tag_size > 0) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = union_ty.tag_ty, + .val = un.tag.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } + }, + .memoized_call => unreachable, } + return .ok; } fn lowerParentPtr( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, - parent_ptr: Value, + parent_ptr: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { - const target = bin_file.options.target; - switch (parent_ptr.tag()) { - .field_ptr => { - const field_ptr = parent_ptr.castTag(.field_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - field_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) { - .Pointer => offset: { - assert(field_ptr.container_ty.isSlice()); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - break :offset switch (field_ptr.field_index) { - 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target), - else => unreachable, - }; - }, - .Struct, .Union => field_ptr.container_ty.structFieldOffset( - field_ptr.field_index, - target, - ), - else => return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for field_ptr with a container of type {}", - .{field_ptr.container_ty.fmt(bin_file.options.module.?)}, - ) }, - })), - ); - }, - .elem_ptr => { - const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - elem_ptr.array_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), - ); - }, - .opt_payload_ptr => { - const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - opt_payload_ptr.container_ptr, - code, - debug_output, - reloc_info, - ); - }, - .eu_payload_ptr => { - const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - eu_payload_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))), - ); - }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( + const mod = bin_file.options.module.?; + const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr; + assert(ptr.len == .none); + return switch (ptr.addr) { + .decl, .mut_decl => try lowerDeclRef( bin_file, src_loc, - typed_value, - switch (tag) { - .variable => parent_ptr.castTag(.variable).?.data.owner_decl, - .decl_ref => parent_ptr.castTag(.decl_ref).?.data, - .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }, code, debug_output, reloc_info, ), - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, + .int => |int| try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = int.toValue(), + }, code, debug_output, reloc_info), + .eu_payload => |eu_payload| try lowerParentPtr( + bin_file, src_loc, - "TODO implement lowerParentPtr for type '{s}'", - .{@tagName(tag)}, - ) }, - } + eu_payload, + code, + debug_output, + reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + mod.intern_pool.typeOf(eu_payload).toType(), + mod, + ))), + ), + .opt_payload => |opt_payload| try lowerParentPtr( + bin_file, + src_loc, + opt_payload, + code, + debug_output, + reloc_info, + ), + .elem => |elem| try lowerParentPtr( + bin_file, + src_loc, + elem.base, + code, + debug_output, + reloc_info.offset(@intCast(u32, elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + ), + .field => |field| { + const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child; + return lowerParentPtr( + bin_file, + src_loc, + field.base, + code, + debug_output, + reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { + 0 => 0, + 1 => @divExact(mod.getTarget().ptrBitWidth(), 8), + else => unreachable, + }, + }, + .struct_type, + .anon_struct_type, + .union_type, + => @intCast(u32, base_type.toType().structFieldOffset( + @intCast(u32, field.index), + mod, + )), + else => unreachable, + }), + ); + }, + .comptime_field => unreachable, + }; } const RelocInfo = struct { @@ -938,51 +704,25 @@ const RelocInfo = struct { fn lowerDeclRef( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, decl_index: Module.Decl.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { + _ = src_loc; + _ = debug_output; const target = bin_file.options.target; - const module = bin_file.options.module.?; - if (typed_value.ty.isSlice()) { - // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(module), - }; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - } + const mod = bin_file.options.module.?; const ptr_width = target.ptrBitWidth(); - const decl = module.declPtr(decl_index); - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if (!is_fn_body and !decl.ty.hasRuntimeBits()) { + const decl = mod.declPtr(decl_index); + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if (!is_fn_body and !decl.ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } - module.markDeclAlive(decl); + try mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -1059,16 +799,16 @@ fn genDeclRef( tv: TypedValue, decl_index: Module.Decl.Index, ) CodeGenError!GenResult { - const module = bin_file.options.module.?; - log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); + const mod = bin_file.options.module.?; + log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(mod), tv.val.fmtValue(tv.ty, mod) }); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const imm: u64 = switch (ptr_bytes) { 1 => 0xaa, 2 => 0xaaaa, @@ -1080,20 +820,20 @@ fn genDeclRef( } // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? - if (tv.ty.castPtrToFn()) |fn_ty| { - if (fn_ty.fnInfo().is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(target) }); + if (tv.ty.castPtrToFn(mod)) |fn_ty| { + if (mod.typeToFunc(fn_ty).?.is_generic) { + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } - } else if (tv.ty.zigTypeTag() == .Pointer) { - const elem_ty = tv.ty.elemType2(); - if (!elem_ty.hasRuntimeBits()) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(target) }); + } else if (tv.ty.zigTypeTag(mod) == .Pointer) { + const elem_ty = tv.ty.elemType2(mod); + if (!elem_ty.hasRuntimeBits(mod)) { + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) }); } } - module.markDeclAlive(decl); + try mod.markDeclAlive(decl); - const is_threadlocal = tv.val.isPtrToThreadLocal(module) and !bin_file.options.single_threaded; + const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; if (bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index); @@ -1157,57 +897,56 @@ pub fn genTypedValue( arg_tv: TypedValue, owner_decl_index: Module.Decl.Index, ) CodeGenError!GenResult { + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (typed_value.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndef()) + if (typed_value.val.isUndef(mod)) return GenResult.mcv(.undef); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice()) { - if (typed_value.val.castTag(.variable)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); - } - if (typed_value.val.castTag(.decl_ref)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data); - } - if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); - } - } + if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl), + .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl), + else => {}, + }, + else => {}, + }; - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), - .Pointer => switch (typed_value.ty.ptrSize()) { + .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, - else => { - switch (typed_value.val.tag()) { - .null_value => { - return GenResult.mcv(.{ .immediate = 0 }); - }, - .int_u64 => { - return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) }); + else => switch (typed_value.val.toIntern()) { + .null_value => { + return GenResult.mcv(.{ .immediate = 0 }); + }, + .none => {}, + else => switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .int => { + return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, - } + }, }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(target)), - .unsigned => typed_value.val.toUnsignedInt(target), + .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); } @@ -1216,78 +955,46 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); }, .Optional => { - if (typed_value.ty.isPtrLikeOptional()) { - if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - - var buf: Type.Payload.ElemType = undefined; + if (typed_value.ty.isPtrLikeOptional(mod)) { return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.optionalChild(&buf), - .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, + .ty = typed_value.ty.optionalChild(mod), + .val = typed_value.val.optionalValue(mod) orelse return GenResult.mcv(.{ .immediate = 0 }), }, owner_decl_index); - } else if (typed_value.ty.abiSize(target) == 1) { - return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) }); + } else if (typed_value.ty.abiSize(mod) == 1) { + return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); } }, .Enum => { - if (typed_value.val.castTag(.enum_field_index)) |field_index| { - switch (typed_value.ty.tag()) { - .enum_simple => { - return GenResult.mcv(.{ .immediate = field_index.data }); - }, - .enum_numbered, .enum_full, .enum_nonexhaustive => { - const enum_values = if (typed_value.ty.castTag(.enum_numbered)) |pl| - pl.data.values - else - typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; - if (enum_values.count() != 0) { - const tag_val = enum_values.keys()[field_index.data]; - var buf: Type.Payload.Bits = undefined; - return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(&buf), - .val = tag_val, - }, owner_decl_index); - } else { - return GenResult.mcv(.{ .immediate = field_index.data }); - } - }, - else => unreachable, - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); - return genTypedValue(bin_file, src_loc, .{ - .ty = int_tag_ty, - .val = typed_value.val, - }, owner_decl_index); - } + const enum_tag = mod.intern_pool.indexToKey(typed_value.val.toIntern()).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return genTypedValue(bin_file, src_loc, .{ + .ty = int_tag_ty.toType(), + .val = enum_tag.int.toValue(), + }, owner_decl_index); }, .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return GenResult.mcv(.{ .immediate = error_index }); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return GenResult.mcv(.{ .immediate = 0 }); - }, - } + const err_name = mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name; + const error_index = mod.global_error_set.getIndex(err_name).?; + return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(); - const payload_type = typed_value.ty.errorUnionPayload(); - const is_pl = typed_value.val.errorUnionIsPayload(); - - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + const err_type = typed_value.ty.errorUnionSet(mod); + const payload_type = typed_value.ty.errorUnionPayload(mod); + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); - return genTypedValue(bin_file, src_loc, .{ - .ty = error_type, - .val = err_val, - }, owner_decl_index); + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).error_union.val) { + .err_name => |err_name| return genTypedValue(bin_file, src_loc, .{ + .ty = err_type, + .val = (try mod.intern(.{ .err = .{ + .ty = err_type.toIntern(), + .name = err_name, + } })).toValue(), + }, owner_decl_index), + .payload => return genTypedValue(bin_file, src_loc, .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, owner_decl_index), + } } }, @@ -1306,23 +1013,23 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); } } -pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); +pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 86b74b1429..c1b7bd72b1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -256,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a `*Module.Fn`. +/// This data is available when outputting .c code for a `Module.Fn.Index`. /// It is not available when generating .h file. pub const Function = struct { air: Air, @@ -267,7 +268,7 @@ pub const Function = struct { next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, - func: *Module.Fn, + func_index: Module.Fn.Index, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, /// Which locals are available for reuse, based on Type. @@ -285,10 +286,11 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(inst); if (gop.found_existing) return gop.value_ptr.*; - const val = f.air.value(ref).?; - const ty = f.air.typeOf(ref); + const mod = f.object.dg.module; + const val = (try f.air.value(ref, mod)).?; + const ty = f.typeOf(ref); - const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { + const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); @@ -318,11 +320,11 @@ pub const Function = struct { /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue { + const mod = f.object.dg.module; const gpa = f.object.dg.gpa; - const target = f.object.dg.module.getTarget(); try f.locals.append(gpa, .{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } @@ -336,10 +338,10 @@ pub const Function = struct { /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should /// not be used for persistent locals (i.e. those in `allocs`). fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; if (f.free_locals_map.getPtr(.{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), })) |locals_list| { if (locals_list.popOrNull()) |local_entry| { return .{ .new_local = local_entry.key }; @@ -352,8 +354,9 @@ pub const Function = struct { fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -364,8 +367,9 @@ pub const Function = struct { fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -377,8 +381,9 @@ pub const Function = struct { fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -390,8 +395,9 @@ pub const Function = struct { fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -446,6 +452,7 @@ pub const Function = struct { var promoted = f.object.dg.ctypes.promote(gpa); defer f.object.dg.ctypes.demote(promoted); const arena = promoted.arena.allocator(); + const mod = f.object.dg.module; gop.value_ptr.* = .{ .fn_name = switch (key) { @@ -454,12 +461,12 @@ pub const Function = struct { .never_inline, => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{ @tagName(key), - fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }), }, .data = switch (key) { - .tag_name => .{ .tag_name = try data.tag_name.copy(arena) }, + .tag_name => .{ .tag_name = data.tag_name }, .never_tail => .{ .never_tail = data.never_tail }, .never_inline => .{ .never_inline = data.never_inline }, }, @@ -480,6 +487,16 @@ pub const Function = struct { f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); } + + fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { + const mod = f.object.dg.module; + return f.air.typeOf(inst, &mod.intern_pool); + } + + fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { + const mod = f.object.dg.module; + return f.air.typeOfIndex(inst, &mod.intern_pool); + } }; /// This data is available when outputting .c code for a `Module`. @@ -508,8 +525,9 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const mod = dg.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(dg.decl.?); + const src_loc = src.toSrcLoc(dg.decl.?, mod); dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -522,53 +540,28 @@ pub const DeclGen = struct { decl_index: Decl.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) { + if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) { return dg.writeCValue(writer, .{ .undef = ty }); } // Chase function values in order to be able to reference the original function. - inline for (.{ .function, .extern_fn }) |tag| - if (decl.val.castTag(tag)) |func| - if (func.data.owner_decl != decl_index) - return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location); + if (decl.val.getFunction(mod)) |func| if (func.owner_decl != decl_index) + return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); + if (decl.val.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.val.castTag(.variable)) |var_payload| - try dg.renderFwdDecl(decl_index, var_payload.data); - - if (ty.isSlice()) { - if (location == .StaticInitializer) { - try writer.writeByte('{'); - } else { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeAll("){ .ptr = "); - } - - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer); - - var len_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(dg.module), - }; - const len_val = Value.initPayload(&len_pl.base); - - if (location == .StaticInitializer) { - return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } else { - return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } - } + if (decl.val.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module); + const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.ty, mod); if (need_typecast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -579,127 +572,124 @@ pub const DeclGen = struct { if (need_typecast) try writer.writeByte(')'); } - // Renders a "parent" pointer by recursing to the root decl/variable - // that its contents are defined with respect to. - // - // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr - fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { - if (!ptr_ty.isSlice()) { - try writer.writeByte('('); - try dg.renderType(writer, ptr_ty); - try writer.writeByte(')'); - } - switch (ptr_val.tag()) { - .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), - .decl_ref_mut, .decl_ref, .variable => { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - .variable => ptr_val.castTag(.variable).?.data.owner_decl, + /// Renders a "parent" pointer by recursing to the root decl/variable + /// that its contents are defined with respect to. + fn renderParentPtr( + dg: *DeclGen, + writer: anytype, + ptr_val: InternPool.Index, + location: ValueRenderLocation, + ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; + const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType(); + const ptr_cty = try dg.typeToIndex(ptr_ty, .complete); + const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val.toValue(), + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + location, + ), + .int => |int| try writer.print("{x}", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }), + .eu_payload, .opt_payload => |base| { + const ptr_base_ty = mod.intern_pool.typeOf(base).toType(); + const base_ty = ptr_base_ty.childType(mod); + // Ensure complete type definition is visible before accessing fields. + _ = try dg.typeToIndex(base_ty, .complete); + const payload_ty = switch (ptr.addr) { + .eu_payload => base_ty.errorUnionPayload(mod), + .opt_payload => base_ty.optionalChild(mod), else => unreachable, }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); + const ptr_payload_ty = try mod.adjustPtrTypeChild(ptr_base_ty, payload_ty); + const ptr_payload_cty = try dg.typeToIndex(ptr_payload_ty, .complete); + if (ptr_cty != ptr_payload_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } + try writer.writeAll("&("); + try dg.renderParentPtr(writer, base, location); + try writer.writeAll(")->payload"); }, - .field_ptr => { - const target = dg.module.getTarget(); - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - + .elem => |elem| { + const ptr_base_ty = mod.intern_pool.typeOf(elem.base).toType(); + const elem_ty = ptr_base_ty.elemType2(mod); + const ptr_elem_ty = try mod.adjustPtrTypeChild(ptr_base_ty, elem_ty); + const ptr_elem_cty = try dg.typeToIndex(ptr_elem_ty, .complete); + if (ptr_cty != ptr_elem_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } + try writer.writeAll("&("); + if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One) + try writer.writeByte('*'); + try dg.renderParentPtr(writer, elem.base, location); + try writer.print(")[{d}]", .{elem.index}); + }, + .field => |field| { + const ptr_base_ty = mod.intern_pool.typeOf(field.base).toType(); + const base_ty = ptr_base_ty.childType(mod); // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - - var container_ptr_pl = ptr_ty.ptrInfo(); - container_ptr_pl.data.pointee_type = field_ptr.container_ty; - const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); - - switch (fieldLocation( - field_ptr.container_ty, - ptr_ty, - @intCast(u32, field_ptr.field_index), - target, - )) { - .begin => try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ), - .field => |field| { + _ = try dg.typeToIndex(base_ty, .complete); + const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod), + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { + Value.slice_ptr_index => base_ty.slicePtrFieldType(mod), + Value.slice_len_index => Type.usize, + else => unreachable, + }, + }, + else => unreachable, + }; + const ptr_field_ty = try mod.adjustPtrTypeChild(ptr_base_ty, field_ty); + const ptr_field_cty = try dg.typeToIndex(ptr_field_ty, .complete); + if (ptr_cty != ptr_field_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } + switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + .begin => try dg.renderParentPtr(writer, field.base, location), + .field => |name| { try writer.writeAll("&("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.writeAll(")->"); - try dg.writeCValue(writer, field); + try dg.writeCValue(writer, name); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = ptr_ty.ptrInfo(); - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(" + {})", .{ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other), }); }, .end => { try writer.writeAll("(("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(") + {})", .{ - try dg.fmtIntLiteral(Type.usize, Value.one, .Other), + try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ptr.elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); - try writer.print(")[{d}]", .{elem_ptr.index}); - }, - .opt_payload_ptr, .eu_payload_ptr => { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - var container_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = payload_ptr.container_ty, - }; - const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); - - // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); - try writer.writeAll(")->payload"); - }, - else => unreachable, + .comptime_field => unreachable, } } @@ -710,23 +700,25 @@ pub const DeclGen = struct { arg_val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - const target = dg.module.getTarget(); + const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, else => .Initializer, }; - const safety_on = switch (dg.module.optimizeMode()) { + const safety_on = switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; - if (val.isUndefDeep()) { - switch (ty.zigTypeTag()) { + if (val.isUndefDeep(mod)) { + switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { return writer.writeAll("0xaa"); @@ -737,8 +729,8 @@ pub const DeclGen = struct { .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}), .Float => { const bits = ty.floatBits(target); - var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -757,7 +749,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, repr_ty, Value.undef, .FunctionArgument); return writer.writeByte(')'); }, - .Pointer => if (ty.isSlice()) { + .Pointer => if (ty.isSlice(mod)) { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -765,8 +757,7 @@ pub const DeclGen = struct { } try writer.writeAll("{("); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(mod); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); } else { @@ -775,14 +766,13 @@ pub const DeclGen = struct { return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return dg.renderValue(writer, payload_ty, val, location); } @@ -798,7 +788,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, Type.bool, val, initializer_type); return writer.writeAll(" }"); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -808,10 +798,10 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBits()) continue; + for (0..ty.structFieldCount(mod)) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, val, initializer_type); @@ -831,29 +821,29 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + if (ty.unionTagTypeSafety(mod)) |tag_ty| { + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, val, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + for (ty.unionFields(mod).values()) |field| { + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); return writer.writeByte('}'); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); } @@ -870,11 +860,11 @@ pub const DeclGen = struct { return writer.writeAll(" }"); }, .Array, .Vector => { - const ai = ty.arrayInfo(); - if (ai.elem_type.eql(Type.u8, dg.module)) { + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { var literal = stringLiteral(writer); try literal.start(); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) try literal.writeChar(0xaa); @@ -887,11 +877,11 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); - try dg.renderValue(writer, ty.childType(), val, initializer_type); + try dg.renderValue(writer, ty.childType(mod), val, initializer_type); } return writer.writeByte('}'); } @@ -916,23 +906,129 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag()) { - .Int => switch (val.tag()) { - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), - }, - .Float => { - const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128); - var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_ty_pl.base); + switch (mod.intern_pool.indexToKey(val.ip_index)) { + // types, not values + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + // memoization, not values + .memoized_call, + => unreachable, + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + // non-runtime values + .undefined => unreachable, + .void => unreachable, + .null => unreachable, + .empty_struct => unreachable, + .@"unreachable" => unreachable, + .generic_poison => unreachable, + + .false => try writer.writeAll("false"), + .true => try writer.writeAll("true"), + }, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), + .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + }, + .err => |err| try writer.print("zig_error_{}", .{ + fmtIdent(mod.intern_pool.stringToSlice(err.name)), + }), + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (error_union.val) { + .err_name => |err_name| return dg.renderValue( + writer, + error_ty, + (try mod.intern(.{ .err = .{ + .ty = error_ty.toIntern(), + .name = err_name, + } })).toValue(), + location, + ), + .payload => return dg.renderValue( + writer, + Type.err_int, + try mod.intValue(Type.err_int, 0), + location, + ), + } + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderValue( + writer, + payload_ty, + switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(), + initializer_type, + ); + try writer.writeAll(", .error = "); + switch (error_union.val) { + .err_name => |err_name| try dg.renderValue( + writer, + error_ty, + (try mod.intern(.{ .err = .{ + .ty = error_ty.toIntern(), + .name = err_name, + } })).toValue(), + location, + ), + .payload => try dg.renderValue( + writer, + Type.err_int, + try mod.intValue(Type.err_int, 0), + location, + ), + } + try writer.writeAll(" }"); + }, + .enum_tag => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, + .float => { + const bits = ty.floatBits(target); + const f128_val = val.toFloat(f128, mod); + + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -943,21 +1039,15 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80))), + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), 128 => repr_val_big.set(@bitCast(u128, f128_val)), else => unreachable, } - var repr_val_pl = Value.Payload.BigInt{ - .base = .{ - .tag = if (repr_val_big.positive) .int_big_positive else .int_big_negative, - }, - .data = repr_val_big.limbs[0..repr_val_big.len], - }; - const repr_val = Value.initPayload(&repr_val_pl.base); + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -968,10 +1058,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16)}), - 32 => try writer.print("{x}", .{val.toFloat(f32)}), - 64 => try writer.print("{x}", .{val.toFloat(f64)}), - 80 => try writer.print("{x}", .{val.toFloat(f80)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1011,10 +1101,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80))}), + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), else => unreachable, }; @@ -1023,144 +1113,132 @@ pub const DeclGen = struct { } try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); if (!empty) try writer.writeByte(')'); - return; }, - .Pointer => switch (val.tag()) { - .null_value, .zero => if (ty.isSlice()) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { + .ptr => |ptr| { + if (ptr.len != .none) { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - - const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); + } + const ptr_location = switch (ptr.len) { + .none => location, + else => initializer_type, + }; + const ptr_ty = switch (ptr.len) { + .none => ty, + else => ty.slicePtrFieldType(mod), + }; + const ptr_val = switch (ptr.len) { + .none => val, + else => val.slicePtr(mod), + }; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + ptr_location, + ), + .int => |int| { + try writer.writeAll("(("); + try dg.renderType(writer, ptr_ty); + try writer.print("){x})", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), ptr_location), + }); + }, + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.renderParentPtr(writer, ptr_val.ip_index, ptr_location), + .comptime_field => unreachable, + } + if (ptr.len != .none) { try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); + try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type); try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => unreachable, + } }, - .Array, .Vector => { - if (location == .FunctionArgument) { + .opt => |opt| { + const payload_ty = ty.optionalChild(mod); + + const is_null_val = Value.makeBool(opt.val == .none); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + return dg.renderValue(writer, Type.bool, is_null_val, location); + + if (ty.optionalReprIsPayload(mod)) return dg.renderValue( + writer, + payload_ty, + switch (opt.val) { + .none => switch (payload_ty.zigTypeTag(mod)) { + .ErrorSet => try mod.intValue(Type.err_int, 0), + .Pointer => try mod.getCoerced(val, payload_ty), + else => unreachable, + }, + else => |payload| payload.toValue(), + }, + location, + ); + + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - // First try specific tag representations for more efficiency. - switch (val.tag()) { - .undef, .empty_struct_value, .empty_array => { - const ai = ty.arrayInfo(); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - }, - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), - }); - }, - else => { - // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => |payload| payload, + }.toValue(), initializer_type); + try writer.writeAll(", .is_null = "); + try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); + try writer.writeAll(" }"); + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type, .vector_type => { + if (location == .FunctionArgument) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + // Fall back to generic implementation. - // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal - const max_string_initializer_len = 65535; + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; - const ai = ty.arrayInfo(); - if (ai.elem_type.eql(Type.u8, dg.module)) { - if (ai.len <= max_string_initializer_len) { - var literal = stringLiteral(writer); - try literal.start(); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); - try literal.writeChar(elem_val_u8); - } - if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(target)); - if (s_u8 != 0) try literal.writeChar(s_u8); - } - try literal.end(); - } else { - try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); - try writer.print("'\\x{x}'", .{elem_val_u8}); - } - if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } - try writer.writeByte('}'); + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); } else { try writer.writeByte('{'); var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { if (index != 0) try writer.writeByte(','); @@ -1168,122 +1246,22 @@ pub const DeclGen = struct { } try writer.writeByte('}'); } - }, - } - }, - .Bool => { - if (val.toBool()) { - return writer.writeAll("true"); - } else { - return writer.writeAll("false"); - } - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); - - const is_null_val = Value.makeBool(val.tag() == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) - return dg.renderValue(writer, Type.bool, is_null_val, location); - - if (ty.optionalReprIsPayload()) { - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; - return dg.renderValue(writer, payload_ty, payload_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef; - - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); - try writer.writeAll(" }"); - }, - .ErrorSet => { - if (val.castTag(.@"error")) |error_pl| { - // Error values are already defined by genErrDecls. - try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)}); - } else { - try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)}); - } - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return dg.renderValue(writer, error_ty, error_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, initializer_type); - try writer.writeAll(" }"); - }, - .Enum => { - switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - switch (ty.tag()) { - .enum_simple => return writer.print("{d}", .{field_index}), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index]; - return dg.renderValue(writer, enum_full.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - const tag_val = enum_obj.values.keys()[field_index]; - return dg.renderValue(writer, enum_obj.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - else => unreachable, + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); } - }, - else => { - var int_tag_ty_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_ty_buffer); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - } - }, - .Fn => switch (val.tag()) { - .function => { - const decl = val.castTag(.function).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } }, - .extern_fn => { - const decl = val.castTag(.extern_fn).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - else => unreachable, - }, - .Struct => switch (ty.containerLayout()) { - .Auto, .Extern => { - const field_vals = val.castTag(.aggregate).?.data; - + .anon_struct_type => |tuple| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -1292,133 +1270,184 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + for (tuple.types, tuple.values, 0..) |field_ty, comptime_ty, field_i| { + if (comptime_ty != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field_ty, field_val, initializer_type); + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty, + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), initializer_type); empty = false; } try writer.writeByte('}'); }, - .Packed => { - const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(target); - - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); - - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - - var eff_num_fields: usize = 0; - for (0..field_vals.len) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - - eff_num_fields += 1; - } - - if (eff_num_fields == 0) { - try writer.writeByte('('); - try dg.renderValue(writer, ty, Value.undef, initializer_type); - try writer.writeByte(')'); - } else if (ty.bitSize(target) > 64) { - // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) - var num_or = eff_num_fields - 1; - while (num_or > 0) : (num_or -= 1) { - try writer.writeAll("zig_or_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - } - - var eff_index: usize = 0; - var needs_closing_paren = false; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - - const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset_val_pl.data != 0) { - try writer.writeAll("zig_shl_"); - try dg.renderTypeForBuiltinFnName(writer, ty); + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + switch (struct_obj.layout) { + .Auto, .Extern => { + if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); - try writer.writeAll(", "); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try dg.renderType(writer, ty); try writer.writeByte(')'); - } else { - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); } - if (needs_closing_paren) try writer.writeByte(')'); - if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + try writer.writeByte('{'); + var empty = true; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - bit_offset_val_pl.data += field_ty.bitSize(target); - needs_closing_paren = true; - eff_index += 1; - } - } else { - try writer.writeByte('('); - // a << a_off | b << b_off | c << c_off - var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!empty) try writer.writeByte(','); + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type); - if (!empty) try writer.writeAll(" | "); - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); + empty = false; + } + try writer.writeByte('}'); + }, + .Packed => { + const int_info = ty.intInfo(mod); - if (bit_offset_val_pl.data != 0) { - try dg.renderValue(writer, field_ty, field_val, .Other); - try writer.writeAll(" << "); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - } else { - try dg.renderValue(writer, field_ty, field_val, .Other); + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); + + var bit_offset: u64 = 0; + var eff_num_fields: usize = 0; + + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + eff_num_fields += 1; } - bit_offset_val_pl.data += field_ty.bitSize(target); - empty = false; - } - try writer.writeByte(')'); + if (eff_num_fields == 0) { + try writer.writeByte('('); + try dg.renderValue(writer, ty, Value.undef, initializer_type); + try writer.writeByte(')'); + } else if (ty.bitSize(mod) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } + + var eff_index: usize = 0; + var needs_closing_paren = false; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } }; + if (bit_offset != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument); + try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset += field.ty.bitSize(mod); + needs_closing_paren = true; + eff_index += 1; + } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + + if (bit_offset != 0) { + try dg.renderValue(writer, field.ty, field_val.toValue(), .Other); + try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field.ty, field_val.toValue(), .Other); + } + + bit_offset += field.ty.bitSize(mod); + empty = false; + } + try writer.writeByte(')'); + } + }, } }, + else => unreachable, }, - .Union => { - const union_obj = val.castTag(.@"union").?.data; - + .un => |un| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; - const field_ty = ty.unionFields().values()[field_i].ty; - const field_name = ty.unionFields().keys()[field_i]; - if (ty.containerLayout() == .Packed) { - if (field_ty.hasRuntimeBits()) { - if (field_ty.isPtrAtRuntime()) { + const field_i = ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + const field_ty = ty.unionFields(mod).values()[field_i].ty; + const field_name = ty.unionFields(mod).keys()[field_i]; + if (ty.containerLayout(mod) == .Packed) { + if (field_ty.hasRuntimeBits(mod)) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); - } else if (field_ty.zigTypeTag() == .Float) { + } else if (field_ty.zigTypeTag(mod) == .Float) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); + try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); } else { try writer.writeAll("0"); } @@ -1426,44 +1455,28 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + if (ty.unionTagTypeSafety(mod)) |tag_ty| { + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); - try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); + try dg.renderValue(writer, tag_ty, un.tag.toValue(), initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - if (field_ty.hasRuntimeBits()) { - try writer.print(" .{ } = ", .{fmtIdent(field_name)}); - try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); + if (field_ty.hasRuntimeBits(mod)) { + try writer.print(" .{ } = ", .{fmtIdent(mod.intern_pool.stringToSlice(field_name))}); + try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); try writer.writeByte(' '); - } else for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + } else for (ty.unionFields(mod).values()) |field| { + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), } } @@ -1478,12 +1491,12 @@ pub const DeclGen = struct { }, ) !void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; - const fn_decl = module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); - const fn_info = fn_decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_decl.ty).?; if (fn_info.cc == .Naked) { switch (kind) { .forward => try w.writeAll("zig_naked_decl "), @@ -1491,14 +1504,13 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.castTag(.function)) |func_payload| - if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); + if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); + if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1512,8 +1524,8 @@ pub const DeclGen = struct { switch (kind) { .forward => {}, - .complete => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .complete => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), else => unreachable, } @@ -1525,7 +1537,7 @@ pub const DeclGen = struct { try renderTypeSuffix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1537,8 +1549,8 @@ pub const DeclGen = struct { ); switch (kind) { - .forward => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .forward => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), .complete => {}, else => unreachable, } @@ -1577,9 +1589,9 @@ pub const DeclGen = struct { fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; - _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + const mod = dg.module; + _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1619,18 +1631,18 @@ pub const DeclGen = struct { /// | > 64 bit integer | < 64 bit integer | zig_make_(0, src) /// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src)) fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { - const target = dg.module.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const dest_int_info = dest_ty.intInfo(target); + const mod = dg.module; + const dest_bits = dest_ty.bitSize(mod); + const dest_int_info = dest_ty.intInfo(mod); - const src_is_ptr = src_ty.isPtrAtRuntime(); + const src_is_ptr = src_ty.isPtrAtRuntime(mod); const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(target); - const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null; + const src_bits = src_eff_ty.bitSize(mod); + const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or @@ -1703,8 +1715,8 @@ pub const DeclGen = struct { alignment: u32, kind: CType.Kind, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)); + const mod = dg.module; + const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)); try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas); } @@ -1717,7 +1729,7 @@ pub const DeclGen = struct { alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; switch (std.math.order(alignas.@"align", alignas.abi)) { .lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}), @@ -1726,25 +1738,20 @@ pub const DeclGen = struct { } const trailing = - try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers); + try renderTypePrefix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, qualifiers); try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { - switch (tv.val.tag()) { - .extern_fn => return true, - .function => { - const func = tv.val.castTag(.function).?.data; - return dg.module.decl_exports.contains(func.owner_decl); - }, - .variable => { - const variable = tv.val.castTag(.variable).?.data; - return dg.module.decl_exports.contains(variable.owner_decl); - }, + const mod = dg.module; + return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .variable => |variable| mod.decl_exports.contains(variable.decl), + .extern_func => true, + .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl), else => unreachable, - } + }; } fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { @@ -1819,7 +1826,7 @@ pub const DeclGen = struct { try dg.writeCValue(writer, member); } - fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: *Module.Var) !void { + fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: InternPool.Key.Variable) !void { const decl = dg.module.declPtr(decl_index); const fwd_decl_writer = dg.fwd_decl.writer(); const is_global = dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val }) or variable.is_extern; @@ -1830,7 +1837,7 @@ pub const DeclGen = struct { fwd_decl_writer, decl.ty, .{ .decl = decl_index }, - CQualifiers.init(.{ .@"const" = !variable.is_mutable }), + CQualifiers.init(.{ .@"const" = variable.is_const }), decl.@"align", .complete, ); @@ -1838,19 +1845,20 @@ pub const DeclGen = struct { } fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + try mod.markDeclAlive(decl); - if (dg.module.decl_exports.get(decl_index)) |exports| { - try writer.writeAll(exports.items[export_index].options.name); - } else if (decl.isExtern()) { - try writer.writeAll(mem.span(decl.name)); + if (mod.decl_exports.get(decl_index)) |exports| { + try writer.print("{}", .{exports.items[export_index].opts.name.fmt(&mod.intern_pool)}); + } else if (decl.isExtern(mod)) { + try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)}); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; var name_stream = std.io.fixedBufferStream(&name); - decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) { + decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => {}, }; try writer.print("{}__{d}", .{ @@ -1894,18 +1902,18 @@ pub const DeclGen = struct { .bits => {}, } - const target = dg.module.getTarget(); - const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{ + const mod = dg.module; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(target)), + .bits = @intCast(u16, ty.bitSize(mod)), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); - var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits }; + const bits_ty = if (is_big) Type.u16 else Type.u8; try writer.print(", {}", .{try dg.fmtIntLiteral( - if (is_big) Type.u16 else Type.u8, - Value.initPayload(&bits_pl.base), + bits_ty, + try mod.intValue(bits_ty, int_info.bits), .FunctionArgument, )}); } @@ -1916,6 +1924,7 @@ pub const DeclGen = struct { val: Value, loc: ValueRenderLocation, ) !std.fmt.Formatter(formatIntLiteral) { + const mod = dg.module; const kind: CType.Kind = switch (loc) { .FunctionArgument => .parameter, .Initializer, .Other => .complete, @@ -1923,7 +1932,7 @@ pub const DeclGen = struct { }; return std.fmt.Formatter(formatIntLiteral){ .data = .{ .dg = dg, - .int_info = ty.intInfo(dg.module.getTarget()), + .int_info = ty.intInfo(mod), .kind = kind, .cty = try dg.typeToCType(ty, kind), .val = val, @@ -1979,7 +1988,7 @@ fn renderTypeName( try w.print("{s} {s}{}__{d}", .{ @tagName(tag)["fwd_".len..], attributes, - fmtIdent(mem.span(mod.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }); }, @@ -2392,15 +2401,20 @@ pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { } pub fn genErrDecls(o: *Object) !void { + const mod = o.dg.module; const writer = o.writer(); try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (o.dg.module.error_name_list.items, 0..) |name, value| { - max_name_len = std.math.max(name.len, max_name_len); - var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; - try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); + for (mod.global_error_set.keys()[1..], 1..) |name_nts, value| { + const name = mod.intern_pool.stringToSlice(name_nts); + max_name_len = @max(name.len, max_name_len); + const err_val = try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = name_nts, + } }); + try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other); try writer.print(" = {d}u,\n", .{value}); } o.indent_writer.popIndent(); @@ -2412,40 +2426,44 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (o.dg.module.error_name_list.items) |name| { + for (mod.global_error_set.keys()) |name_nts| { + const name = mod.intern_pool.stringToSlice(name_nts); @memcpy(name_buf[name_prefix.len..][0..name.len], name); const identifier = name_buf[0 .. name_prefix.len + name.len]; - var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; - const name_ty = Type.initPayload(&name_ty_pl.base); - - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; - const name_val = Value.initPayload(&name_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = name }, + } }); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete); try writer.writeAll(" = "); - try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer); + try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer); try writer.writeAll(";\n"); } - var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ - .len = o.dg.module.error_name_list.items.len, - .elem_type = Type.initTag(.const_slice_u8_sentinel_0), - } }; - const name_array_ty = Type.initPayload(&name_array_ty_pl.base); + const name_array_ty = try mod.arrayType(.{ + .len = mod.global_error_set.count(), + .child = .slice_const_u8_sentinel_0_type, + }); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.global_error_set.keys(), 0..) |name_nts, value| { + const name = mod.intern_pool.stringToSlice(name_nts); if (value != 0) try writer.writeByte(','); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ - fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other), + fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .StaticInitializer), }); } try writer.writeAll("};\n"); @@ -2455,20 +2473,23 @@ fn genExports(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; + const ip = &mod.intern_pool; const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { + if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { for (exports.items[1..], 1..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(exports.items[0].options.name, null), - fmtStringLiteral(@"export".options.name, null), + fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null), + fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null), }); } } } pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { + const mod = o.dg.module; const w = o.writer(); const key = lazy_fn.key_ptr.*; const val = lazy_fn.value_ptr; @@ -2477,7 +2498,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_slice_ty = Type.slice_const_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2486,34 +2507,30 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeByte('('); try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); - for (enum_ty.enumFields().keys(), 0..) |name, index| { - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| { + const index = @intCast(u32, index_usize); + const name = mod.intern_pool.stringToSlice(name_ip); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(enum_ty, &int_pl); + const int_val = try tag_val.enumToInt(enum_ty, mod); - var name_ty_pl = Type.Payload.Len{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = name.len, - }; - const name_ty = Type.initPayload(&name_ty_pl.base); - - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; - const name_val = Value.initPayload(&name_pl.base); - - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = name }, + } }); + const len_val = try mod.intValue(Type.usize, name.len); try w.print(" case {}: {{\n static ", .{ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other), }); try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete); try w.writeAll(" = "); - try o.dg.renderValue(w, name_ty, name_val, .Initializer); + try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer); try w.writeAll(";\n return ("); try o.dg.renderType(w, name_slice_ty); try w.print("){{{}, {}}};\n", .{ @@ -2529,7 +2546,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeAll("}\n"); }, .never_tail, .never_inline => |fn_decl_index| { - const fn_decl = o.dg.module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete); const fn_info = fn_cty.cast(CType.Payload.Function).?.data; @@ -2646,19 +2663,19 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; const decl = o.dg.decl.?; const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? }; - const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; + const tv: TypedValue = .{ .ty = decl.ty, .val = (try decl.internValue(mod)).toValue() }; - if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; - if (tv.val.tag() == .extern_fn) { + if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; + if (tv.val.getExternFunc(mod)) |_| { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); - } else if (tv.val.castTag(.variable)) |var_payload| { - const variable: *Module.Var = var_payload.data; + } else if (tv.val.getVariable(mod)) |variable| { try o.dg.renderFwdDecl(decl_c_value.decl, variable); try genExports(o); @@ -2669,11 +2686,12 @@ pub fn genDecl(o: *Object) !void { if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); - if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + try w.print("zig_linksection(\"{s}\", ", .{s}); try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); - if (decl.@"linksection" != null) try w.writeAll(", read, write)"); + if (decl.@"linksection" != .none) try w.writeAll(", read, write)"); try w.writeAll(" = "); - try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); + try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { @@ -2686,9 +2704,10 @@ pub fn genDecl(o: *Object) !void { const w = o.writer(); if (!is_global) try w.writeAll("static "); - if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + try w.print("zig_linksection(\"{s}\", ", .{s}); try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.@"align", .complete); - if (decl.@"linksection" != null) try w.writeAll(", read)"); + if (decl.@"linksection" != .none) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); try w.writeAll(";\n"); @@ -2704,8 +2723,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { .val = dg.decl.?.val, }; const writer = dg.fwd_decl.writer(); + const mod = dg.module; - switch (tv.ty.zigTypeTag()) { + switch (tv.ty.zigTypeTag(mod)) { .Fn => { const is_global = dg.declIsGlobal(tv); if (is_global) { @@ -2791,17 +2811,18 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { + const mod = f.object.dg.module; + const ip = &mod.intern_pool; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst)) { + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip)) continue; - } const result_value = switch (air_tags[inst]) { // zig fmt: off - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, + .arg => try airArg(f, inst), .trap => try airTrap(f.object.writer()), @@ -2826,10 +2847,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(); + const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. - break :blk if (lhs_scalar_ty.isInt()) + break :blk if (lhs_scalar_ty.isInt(mod)) try airBinOp(f, inst, "%", "rem", .none) else try airBinFloatOp(f, inst, "fmod"); @@ -3077,7 +3098,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -3095,9 +3116,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const mod = f.object.dg.module; + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3120,13 +3142,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(bin_op.lhs); + const elem_ty = ptr_ty.childType(mod); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3141,7 +3164,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (elem_has_bits) try writer.writeByte('&'); - if (elem_has_bits and ptr_ty.ptrSize() == .One) { + if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); } else try f.writeCValue(writer, ptr, .Other); @@ -3155,9 +3178,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const mod = f.object.dg.module; + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3180,13 +3204,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const slice_ty = f.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.elemType2(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const inst_ty = f.typeOfIndex(inst); + const slice_ty = f.typeOf(bin_op.lhs); + const elem_ty = slice_ty.elemType2(mod); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3209,9 +3234,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + const inst_ty = f.typeOfIndex(inst); + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3234,14 +3260,14 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); - const elem_type = inst_ty.elemType(); - if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + const mod = f.object.dg.module; + const inst_ty = f.typeOfIndex(inst); + const elem_type = inst_ty.childType(mod); + if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_type, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3250,14 +3276,14 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); - const elem_ty = inst_ty.elemType(); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + const mod = f.object.dg.module; + const inst_ty = f.typeOfIndex(inst); + const elem_ty = inst_ty.childType(mod); + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_ty, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3266,7 +3292,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_cty = try f.typeToIndex(inst_ty, .parameter); const i = f.next_arg_index; @@ -3290,14 +3316,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = f.air.typeOf(ty_op.operand); - const ptr_scalar_ty = ptr_ty.scalarType(); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_ty = f.typeOf(ty_op.operand); + const ptr_scalar_ty = ptr_ty.scalarType(mod); + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const src_ty = ptr_info.pointee_type; - if (!src_ty.hasRuntimeBitsIgnoreComptime()) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3306,9 +3333,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); - const target = f.object.dg.module.getTarget(); - const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target); - const is_array = lowersToArray(src_ty, target); + const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod); + const is_array = lowersToArray(src_ty, mod); const need_memcpy = !is_aligned or is_array; const writer = f.object.writer(); @@ -3327,29 +3353,13 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, src_ty); try writer.writeAll("))"); } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); + const host_bits: u16 = ptr_info.host_size * 8; + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_pl.data - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - - var field_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, src_ty.bitSize(target)), - }; - const field_ty = Type.initPayload(&field_pl.base); + const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3360,9 +3370,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3390,23 +3400,22 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const target = f.object.dg.module.getTarget(); const op_inst = Air.refToIndex(un_op); - const op_ty = f.air.typeOf(un_op); - const ret_ty = if (is_ptr) op_ty.childType() else op_ty; - var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const op_ty = f.typeOf(un_op); + const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty; + const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); _ = try airCall(f, op_inst.?, .always_tail); - } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; - const is_array = lowersToArray(ret_ty, target); + const is_array = lowersToArray(ret_ty, mod); const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, lowered_ret_ty); try writer.writeAll("memcpy("); @@ -3435,22 +3444,23 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } else { try reap(f, inst, &.{un_op}); // Not even allowed to return void in a naked function. - if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true) + if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true) try writer.writeAll("return;\n"); } return .none; } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); + const operand_ty = f.typeOf(ty_op.operand); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3467,20 +3477,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - const dest_int_info = inst_scalar_ty.intInfo(target); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); + const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); - const scalar_int_info = scalar_ty.intInfo(target); + const operand_ty = f.typeOf(ty_op.operand); + const scalar_ty = operand_ty.scalarType(mod); + const scalar_int_info = scalar_ty.intInfo(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3508,14 +3518,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try v.elem(f, writer); } else switch (dest_int_info.signedness) { .unsigned => { - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - - const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - - const mask_val = try inst_scalar_ty.maxInt(stack.get(), target); + const mask_val = try inst_scalar_ty.maxIntScalar(mod, scalar_ty); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3526,11 +3529,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { .signed => { const c_bits = toCIntBits(scalar_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - var shift_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = c_bits - dest_bits, - }; - const shift_val = Value.initPayload(&shift_pl.base); + const shift_val = try mod.intValue(Type.u8, c_bits - dest_bits); try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); @@ -3566,7 +3565,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); @@ -3577,17 +3576,18 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); - const ptr_scalar_ty = ptr_ty.scalarType(); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_ty = f.typeOf(bin_op.lhs); + const ptr_scalar_ty = ptr_ty.scalarType(mod); + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const ptr_val = try f.resolveInst(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep(mod) else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3602,10 +3602,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - const target = f.object.dg.module.getTarget(); const is_aligned = ptr_info.@"align" == 0 or - ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target); - const is_array = lowersToArray(ptr_info.pointee_type, target); + ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod); + const is_array = lowersToArray(ptr_info.pointee_type, mod); const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); @@ -3647,22 +3646,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { const host_bits = ptr_info.host_size * 8; - var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits }; - const host_ty = Type.initPayload(&host_pl.base); + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - - const src_bits = src_ty.bitSize(target); + const src_bits = src_ty.bitSize(mod); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -3675,11 +3664,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try mask.shiftLeft(&mask, ptr_info.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); - var mask_pl = Value.Payload.BigInt{ - .base = .{ .tag = .int_big_positive }, - .data = mask.limbs[0..mask.len()], - }; - const mask_val = Value.initPayload(&mask_pl.base); + const mask_val = try mod.intValue_big(host_ty, mask.toConst()); try f.writeCValueDeref(writer, ptr_val); try v.elem(f, writer); @@ -3693,9 +3678,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -3705,7 +3690,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeByte(')'); } - if (src_ty.isPtrAtRuntime()) { + if (src_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -3728,6 +3713,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3735,9 +3721,9 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const operand_ty = f.typeOf(bin_op.lhs); + const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3765,15 +3751,16 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); - if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); + const operand_ty = f.typeOf(ty_op.operand); + const scalar_ty = operand_ty.scalarType(mod); + if (scalar_ty.ip_index != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3797,18 +3784,18 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat()) + const operand_ty = f.typeOf(bin_op.lhs); + const scalar_ty = operand_ty.scalarType(mod); + if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3835,12 +3822,12 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { - const lhs_ty = f.air.typeOf(data.lhs); - const scalar_ty = lhs_ty.scalarType(); + const mod = f.object.dg.module; + const lhs_ty = f.typeOf(data.lhs); + const scalar_ty = lhs_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - const scalar_bits = scalar_ty.bitSize(target); - if (scalar_ty.isInt() and scalar_bits > 64) + const scalar_bits = scalar_ty.bitSize(mod); + if (scalar_ty.isInt(mod) and scalar_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3852,13 +3839,13 @@ fn airCmpOp( if (scalar_ty.isRuntimeFloat()) return airCmpBuiltinCall(f, inst, data, operator, .operator, .none); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const rhs_ty = f.air.typeOf(data.rhs); - const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer(); + const rhs_ty = f.typeOf(data.rhs); + const need_cast = lhs_ty.isSinglePointer(mod) or rhs_ty.isSinglePointer(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, writer, lhs_ty); @@ -3885,12 +3872,12 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); - const target = f.object.dg.module.getTarget(); - const operand_bits = operand_ty.bitSize(target); - if (operand_ty.isInt() and operand_bits > 64) + const operand_ty = f.typeOf(bin_op.lhs); + const operand_bits = operand_ty.bitSize(mod); + if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3907,12 +3894,12 @@ fn airEquality( try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) { + if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) { // (A && B) || (C && (A == B)) // A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload @@ -3951,7 +3938,7 @@ fn airEquality( fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -3965,6 +3952,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3972,9 +3960,9 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const elem_ty = inst_scalar_ty.elemType2(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); + const elem_ty = inst_scalar_ty.elemType2(mod); const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); @@ -3983,7 +3971,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try v.elem(f, writer); try writer.writeAll(" = "); - if (elem_ty.hasRuntimeBitsIgnoreComptime()) { + if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We must convert to and from integer types to prevent UB if the operation // results in a NULL pointer, or if LHS is NULL. The operation is only UB // if the result is NULL and then dereferenced. @@ -4012,13 +4000,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64) + if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) return try airBinBuiltinCall(f, inst, operation[1..], .none); if (inst_scalar_ty.isRuntimeFloat()) return try airBinFloatOp(f, inst, operation); @@ -4054,6 +4042,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons } fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4061,9 +4050,8 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const len = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = inst_ty.slicePtrFieldType(&buf); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = inst_ty.slicePtrFieldType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -4092,12 +4080,11 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { + const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; const gpa = f.object.dg.gpa; - const module = f.object.dg.module; - const target = module.getTarget(); const writer = f.object.writer(); const pl_op = f.air.instructions.items(.data)[inst].pl_op; @@ -4107,7 +4094,7 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { - const arg_ty = f.air.typeOf(arg); + const arg_ty = f.typeOf(arg); const arg_cty = try f.typeToIndex(arg_ty, .parameter); if (f.indexToCType(arg_cty).tag() == .void) { resolved_arg.* = .none; @@ -4115,8 +4102,7 @@ fn airCall( } resolved_arg.* = try f.resolveInst(arg); if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { - var lowered_arg_buf: LowerFnRetTyBuffer = undefined; - const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); + const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod); const array_local = try f.allocLocal(inst, lowered_arg_ty); try writer.writeAll("memcpy("); @@ -4138,22 +4124,21 @@ fn airCall( for (args) |arg| try bt.feed(arg); } - const callee_ty = f.air.typeOf(pl_op.operand); - const fn_ty = switch (callee_ty.zigTypeTag()) { + const callee_ty = f.typeOf(pl_op.operand); + const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const ret_ty = fn_ty.fnReturnType(mod); + const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); const result_local = result: { if (modifier == .always_tail) { try writer.writeAll("zig_always_tail return "); break :result .none; - } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result .none; } else if (f.liveness.isUnused(inst)) { try writer.writeByte('('); @@ -4171,19 +4156,22 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand) orelse break :known; - break :fn_decl switch (callee_val.tag()) { - .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, - .function => callee_val.castTag(.function).?.data.owner_decl, - .decl_ref => callee_val.castTag(.decl_ref).?.data, + const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; + break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => break :known, + }, else => break :known, }; }; switch (modifier) { .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), - inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName( - @unionInit(LazyFnKey, @tagName(mod), fn_decl), - @unionInit(LazyFnValue.Data, @tagName(mod), {}), + inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( + @unionInit(LazyFnKey, @tagName(m), fn_decl), + @unionInit(LazyFnValue.Data, @tagName(m), {}), )), else => unreachable, } @@ -4211,7 +4199,7 @@ fn airCall( try writer.writeAll(");\n"); const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, target)) + if (result_local == .none or !lowersToArray(ret_ty, mod)) break :result result_local; const array_local = try f.allocLocal(inst, ret_ty); @@ -4245,18 +4233,21 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { - const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = f.air.instructions.items(.data)[inst].ty_fn; const mod = f.object.dg.module; - try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); + const writer = f.object.writer(); + const function = mod.funcPtr(ty_fn.func); + try writer.print("/* dbg func:{s} */\n", .{ + mod.intern_pool.stringToSlice(mod.declPtr(function.owner_decl).name), + }); return .none; } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep(mod) else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4266,6 +4257,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { } fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Block, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; @@ -4275,8 +4267,8 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { f.next_block_index += 1; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); - const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) + const inst_ty = f.typeOfIndex(inst); + const result = if (inst_ty.ip_index != .void_type and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; @@ -4298,7 +4290,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (!f.air.typeOfIndex(inst).isNoReturn()) { + if (!f.typeOfIndex(inst).isNoReturn(mod)) { // label must be followed by an expression, include an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); } @@ -4310,15 +4302,16 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Try, pl_op.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(pl_op.operand); + const err_union_ty = f.typeOf(pl_op.operand); return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false); } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(mod); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4330,14 +4323,15 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); - const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); + const payload_ty = err_union_ty.errorUnionPayload(mod); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try writer.writeAll("if ("); if (!payload_has_bits) { if (is_ptr) @@ -4399,7 +4393,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { // If result is .none then the value of the block is unused. if (result != .none) { - const operand_ty = f.air.typeOf(branch.operand); + const operand_ty = f.typeOf(branch.operand); const operand = try f.resolveInst(branch.operand); try reap(f, inst, &.{branch.operand}); @@ -4416,10 +4410,10 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const dest_ty = f.air.typeOfIndex(inst); + const dest_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const bitcasted = try bitcast(f, dest_ty, operand, operand_ty); try reap(f, inst, &.{ty_op.operand}); @@ -4431,6 +4425,8 @@ const LocalResult = struct { need_free: bool, fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue { + const mod = f.object.dg.module; + if (lr.need_free) { // Move the freshly allocated local to be owned by this instruction, // by returning it here instead of freeing it. @@ -4441,7 +4437,7 @@ const LocalResult = struct { try lr.free(f); const writer = f.object.writer(); try f.writeCValue(writer, local, .Other); - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4461,12 +4457,13 @@ const LocalResult = struct { }; fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; + const target = mod.getTarget(); const writer = f.object.writer(); - if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) { - const src_info = dest_ty.intInfo(target); - const dest_info = operand_ty.intInfo(target); + if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) { + const src_info = dest_ty.intInfo(mod); + const dest_info = operand_ty.intInfo(mod); if (src_info.signedness == dest_info.signedness and src_info.bits == dest_info.bits) { @@ -4477,7 +4474,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca } } - if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) { + if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) { const local = try f.allocLocal(0, dest_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); @@ -4494,7 +4491,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const operand_lval = if (operand == .constant) blk: { const operand_local = try f.allocLocal(0, operand_ty); try f.writeCValue(writer, operand_local, .Other); - if (operand_ty.isAbiInt()) { + if (operand_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4516,13 +4513,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeAll("));\n"); // Ensure padding bits have the expected value. - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { const dest_cty = try f.typeToCType(dest_ty, .complete); - const dest_info = dest_ty.intInfo(target); - var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, .data = dest_info.bits }; + const dest_info = dest_ty.intInfo(mod); + var bits: u16 = dest_info.bits; var wrap_cty: ?CType = null; var need_bitcasts = false; @@ -4535,9 +4529,9 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const elem_cty = f.indexToCType(pl.data.elem_type); wrap_cty = elem_cty.toSignedness(dest_info.signedness); need_bitcasts = wrap_cty.?.tag() == .zig_i128; - info_ty_pl.data -= 1; - info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8); - info_ty_pl.data += 1; + bits -= 1; + bits %= @intCast(u16, f.byteSize(elem_cty) * 8); + bits += 1; } try writer.writeAll(" = "); if (need_bitcasts) { @@ -4546,7 +4540,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); - const info_ty = Type.initPayload(&info_ty_pl.base); + const info_ty = try mod.intType(dest_info.signedness, bits); if (wrap_cty) |cty| try f.object.dg.renderCTypeForBuiltinFnName(writer, cty) else @@ -4622,8 +4616,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnreach(f: *Function) !CValue { + const mod = f.object.dg.module; // Not even allowed to call unreachable in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); return .none; @@ -4657,6 +4652,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(") "); try genBodyResolveState(f, inst, liveness_condbr.then_deaths, then_body, false); + try writer.writeByte('\n'); // We don't need to use `genBodyResolveState` for the else block, because this instruction is // noreturn so must terminate a body, therefore we don't need to leave `value_map` or @@ -4675,19 +4671,20 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); - const condition_ty = f.air.typeOf(pl_op.operand); + const condition_ty = f.typeOf(pl_op.operand); const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload); const writer = f.object.writer(); try writer.writeAll("switch ("); - if (condition_ty.zigTypeTag() == .Bool) { + if (condition_ty.zigTypeTag(mod) == .Bool) { try writer.writeByte('('); try f.renderType(writer, Type.u1); try writer.writeByte(')'); - } else if (condition_ty.isPtrAtRuntime()) { + } else if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -4714,12 +4711,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { for (items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - if (condition_ty.isPtrAtRuntime()) { + if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -4764,6 +4761,7 @@ fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool { } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; @@ -4777,8 +4775,8 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); - const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: { + const inst_ty = f.typeOfIndex(inst); + const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { try f.writeCValue(writer, local, .Other); @@ -4807,7 +4805,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.air.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -4840,7 +4838,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[0] == '{'; const input_val = try f.resolveInst(input); if (asmInputNeedsLocal(constraint, input_val)) { - const input_ty = f.air.typeOf(input); + const input_ty = f.typeOf(input); if (is_reg) try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(input_ty, alignment); @@ -5025,6 +5023,7 @@ fn airIsNull( operator: []const u8, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5040,23 +5039,22 @@ fn airIsNull( try f.writeCValue(writer, operand, .Other); } - const operand_ty = f.air.typeOf(un_op); - const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; - var payload_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&payload_buf); - var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; + const operand_ty = f.typeOf(un_op); + const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; + const payload_ty = optional_ty.optionalChild(mod); - const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) TypedValue{ .ty = Type.bool, .val = Value.true } - else if (optional_ty.isPtrLikeOptional()) + else if (optional_ty.isPtrLikeOptional(mod)) // operand is a regular pointer, test `operand !=/== NULL` - TypedValue{ .ty = optional_ty, .val = Value.null } - else if (payload_ty.zigTypeTag() == .ErrorSet) - TypedValue{ .ty = payload_ty, .val = Value.zero } - else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: { + TypedValue{ .ty = optional_ty, .val = try mod.getCoerced(Value.null, optional_ty) } + else if (payload_ty.zigTypeTag(mod) == .ErrorSet) + TypedValue{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, 0) } + else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); - const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); - break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; + const slice_ptr_ty = payload_ty.slicePtrFieldType(mod); + const opt_slice_ptr_ty = try mod.optionalType(slice_ptr_ty.toIntern()); + break :rhs TypedValue{ .ty = opt_slice_ptr_ty, .val = try mod.nullValue(opt_slice_ptr_ty) }; } else rhs: { try writer.writeAll(".is_null"); break :rhs TypedValue{ .ty = Type.bool, .val = Value.true }; @@ -5070,24 +5068,24 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const opt_ty = f.air.typeOf(ty_op.operand); + const opt_ty = f.typeOf(ty_op.operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; } - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); try f.writeCValue(writer, operand, .Other); @@ -5104,23 +5102,24 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const ptr_ty = f.air.typeOf(ty_op.operand); - const opt_ty = ptr_ty.childType(); - const inst_ty = f.air.typeOfIndex(inst); + const ptr_ty = f.typeOf(ty_op.operand); + const opt_ty = ptr_ty.childType(mod); + const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { // the operand is just a regular pointer, no need to do anything special. // *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C try writer.writeAll(" = "); @@ -5134,17 +5133,18 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); - const opt_ty = operand_ty.elemType(); + const opt_ty = operand_ty.childType(mod); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { return .none; } @@ -5179,48 +5179,49 @@ fn fieldLocation( container_ty: Type, field_ptr_ty: Type, field_index: u32, - target: std.Target, + mod: *Module, ) union(enum) { begin: void, field: CValue, byte_offset: u32, end: void, } { - return switch (container_ty.zigTypeTag()) { - .Struct => switch (container_ty.containerLayout()) { - .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { - if (container_ty.structFieldIsComptime(next_field_index)) continue; - const field_ty = container_ty.structFieldType(next_field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const ip = &mod.intern_pool; + return switch (container_ty.zigTypeTag(mod)) { + .Struct => switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| { + if (container_ty.structFieldIsComptime(next_field_index, mod)) continue; + const field_ty = container_ty.structFieldType(next_field_index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - break .{ .field = if (container_ty.isSimpleTuple()) + break .{ .field = if (container_ty.isSimpleTuple(mod)) .{ .field = next_field_index } else - .{ .identifier = container_ty.structFieldName(next_field_index) } }; - } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin, - .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) - .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) } + .{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } }; + } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, + .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) + .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, }, - .Union => switch (container_ty.containerLayout()) { + .Union => switch (container_ty.containerLayout(mod)) { .Auto, .Extern => { - const field_ty = container_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) - return if (container_ty.unionTagTypeSafety() != null and - !container_ty.unionHasAllZeroBitFieldTypes()) + const field_ty = container_ty.structFieldType(field_index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) + return if (container_ty.unionTagTypeSafety(mod) != null and + !container_ty.unionHasAllZeroBitFieldTypes(mod)) .{ .field = .{ .identifier = "payload" } } else .begin; - const field_name = container_ty.unionFields().keys()[field_index]; - return .{ .field = if (container_ty.unionTagTypeSafety()) |_| - .{ .payload_identifier = field_name } + const field_name = container_ty.unionFields(mod).keys()[field_index]; + return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_| + .{ .payload_identifier = ip.stringToSlice(field_name) } else - .{ .identifier = field_name } }; + .{ .identifier = ip.stringToSlice(field_name) } }; }, .Packed => .begin, }, - .Pointer => switch (container_ty.ptrSize()) { + .Pointer => switch (container_ty.ptrSize(mod)) { .Slice => switch (field_index) { 0 => .{ .field = .{ .identifier = "ptr" } }, 1 => .{ .field = .{ .identifier = "len" } }, @@ -5238,7 +5239,7 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { const container_ptr_val = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const container_ptr_ty = f.air.typeOf(extra.struct_operand); + const container_ptr_ty = f.typeOf(extra.struct_operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, extra.field_index); } @@ -5247,19 +5248,19 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue const container_ptr_val = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const container_ptr_ty = f.air.typeOf(ty_op.operand); + const container_ptr_ty = f.typeOf(ty_op.operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, index); } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const target = f.object.dg.module.getTarget(); - const container_ptr_ty = f.air.typeOfIndex(inst); - const container_ty = container_ptr_ty.childType(); + const container_ptr_ty = f.typeOfIndex(inst); + const container_ty = container_ptr_ty.childType(mod); - const field_ptr_ty = f.air.typeOf(extra.field_ptr); + const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); @@ -5270,12 +5271,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, container_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5288,15 +5287,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5306,7 +5299,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { }, .end => { try f.writeCValue(writer, field_ptr_val, .Other); - try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5321,9 +5314,9 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const target = f.object.dg.module.getTarget(); - const container_ty = container_ptr_ty.elemType(); - const field_ptr_ty = f.air.typeOfIndex(inst); + const mod = f.object.dg.module; + const container_ty = container_ptr_ty.childType(mod); + const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(container_ty, .complete); @@ -5335,22 +5328,16 @@ fn fieldPtr( try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, field_index, mod)) { .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), .field => |field| { try writer.writeByte('&'); try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5361,7 +5348,7 @@ fn fieldPtr( .end => { try writer.writeByte('('); try f.writeCValue(writer, container_ptr_val, .Other); - try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5370,58 +5357,45 @@ fn fieldPtr( } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + const inst_ty = f.typeOfIndex(inst); + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } - const target = f.object.dg.module.getTarget(); const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const struct_ty = f.air.typeOf(extra.struct_operand); + const struct_ty = f.typeOf(extra.struct_operand); const writer = f.object.writer(); // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(struct_ty, .complete); - const field_name: CValue = switch (struct_ty.tag()) { - .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => if (struct_ty.isSimpleTuple()) + const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => switch (struct_ty.containerLayout(mod)) { + .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index) }, + .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) }, .Packed => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - const int_info = struct_ty.intInfo(target); + const struct_obj = mod.typeToStruct(struct_ty).?; + const int_info = struct_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(target, extra.field_index), - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - const field_int_signedness = if (inst_ty.isAbiInt()) - inst_ty.intInfo(target).signedness + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness else .unsigned; - var field_int_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (field_int_signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, - .data = @intCast(u16, inst_ty.bitSize(target)), - }; - const field_int_ty = Type.initPayload(&field_int_pl.base); + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5432,18 +5406,18 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll(", "); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); @@ -5465,36 +5439,46 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; }, }, - .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) + .{ .field = extra.field_index } + else + .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) }, - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, 0); + .union_type => |union_type| field_name: { + const union_obj = mod.unionPtr(union_type.index); + if (union_obj.layout == .Packed) { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; + + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, 0); + } + + return local; + } else { + const name = union_obj.fields.keys()[extra.field_index]; + break :field_name if (union_type.hasTag()) .{ + .payload_identifier = ip.stringToSlice(name), + } else .{ + .identifier = ip.stringToSlice(name), + }; } - - return local; - } else field_name: { - const name = struct_ty.unionFields().keys()[extra.field_index]; - break :field_name if (struct_ty.unionTagTypeSafety()) |_| - .{ .payload_identifier = name } - else - .{ .identifier = name }; }, else => unreachable, }; @@ -5511,20 +5495,21 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; - const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; + const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits() and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5533,32 +5518,33 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try f.writeCValue(writer, operand, .Other); } else { - if (!error_ty.errorSetIsEmpty()) + if (!error_ty.errorSetIsEmpty(mod)) if (operand_is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else try f.writeCValueMember(writer, operand, .{ .identifier = "error" }) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Initializer); } try writer.writeAll(";\n"); return local; } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); - const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const operand_ty = f.typeOf(ty_op.operand); + const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { + if (!error_union_ty.errorUnionPayload(mod).hasRuntimeBits(mod)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5584,11 +5570,12 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu } fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); - const repr_is_payload = inst_ty.optionalReprIsPayload(); - const payload_ty = f.air.typeOf(ty_op.operand); + const inst_ty = f.typeOfIndex(inst); + const repr_is_payload = inst_ty.optionalReprIsPayload(mod); + const payload_ty = f.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5615,12 +5602,13 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); - const payload_ty = inst_ty.errorUnionPayload(); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); - const err_ty = inst_ty.errorUnionSet(); + const inst_ty = f.typeOfIndex(inst); + const payload_ty = inst_ty.errorUnionPayload(mod); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); + const err_ty = inst_ty.errorUnionSet(mod); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5653,19 +5641,20 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.air.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(mod); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n "); return operand; @@ -5673,13 +5662,13 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); try f.writeCValueDeref(writer, operand); try writer.writeAll(".error = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) if (f.liveness.isUnused(inst)) return .none; - const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); + const local = try f.allocLocal(inst, f.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = &("); try f.writeCValueDeref(writer, operand); @@ -5703,13 +5692,14 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); - const payload_ty = inst_ty.errorUnionPayload(); + const inst_ty = f.typeOfIndex(inst); + const payload_ty = inst_ty.errorUnionPayload(mod); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); - const err_ty = inst_ty.errorUnionSet(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); + const err_ty = inst_ty.errorUnionSet(mod); try reap(f, inst, &.{ty_op.operand}); const writer = f.object.writer(); @@ -5728,29 +5718,30 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try a.end(f, writer); } return local; } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); - const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(); - const error_ty = err_union_ty.errorUnionSet(); + const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; + const payload_ty = err_union_ty.errorUnionPayload(mod); + const error_ty = err_union_ty.errorUnionSet(mod); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!error_ty.errorSetIsEmpty()) - if (payload_ty.hasRuntimeBits()) + if (!error_ty.errorSetIsEmpty(mod)) + if (payload_ty.hasRuntimeBits(mod)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -5758,42 +5749,40 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try writer.writeAll(";\n"); return local; } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.air.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(mod); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); // Unfortunately, C does not support any equivalent to // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); - } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(mod) }, .Initializer); + } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); - try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(); - var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); @@ -5801,19 +5790,20 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" - else if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) - if (inst_ty.isSignedInt()) "fix" else "fixuns" - else if (inst_ty.isRuntimeFloat() and operand_ty.isInt()) - if (operand_ty.isSignedInt()) "float" else "floatun" + else if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) + if (inst_ty.isSignedInt(mod)) "fix" else "fixuns" + else if (inst_ty.isRuntimeFloat() and operand_ty.isInt(mod)) + if (operand_ty.isSignedInt(mod)) "float" else "floatun" else unreachable; @@ -5822,19 +5812,19 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try writer.writeAll("zig_wrap_"); try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); } try writer.writeAll("zig_"); try writer.writeAll(operation); - try writer.writeAll(compilerRtAbbrev(operand_ty, target)); - try writer.writeAll(compilerRtAbbrev(inst_ty, target)); + try writer.writeAll(compilerRtAbbrev(operand_ty, mod)); + try writer.writeAll(compilerRtAbbrev(inst_ty, mod)); try writer.writeByte('('); try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeByte(')'); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); } @@ -5843,12 +5833,13 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -5856,7 +5847,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = ("); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (operand_ty.isSlice()) { + if (operand_ty.isSlice(mod)) { try f.writeCValueMember(writer, operand, .{ .identifier = "len" }); } else { try f.writeCValue(writer, operand, .Other); @@ -5871,14 +5862,15 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); + const operand_ty = f.typeOf(ty_op.operand); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5914,9 +5906,10 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_cty = try f.typeToCType(operand_ty, .complete); const is_big = operand_cty.tag() == .array; @@ -5924,9 +5917,9 @@ fn airBinBuiltinCall( const rhs = try f.resolveInst(bin_op.rhs); if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const scalar_ty = operand_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5968,14 +5961,15 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const operand_ty = f.air.typeOf(data.lhs); - const scalar_ty = operand_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); + const operand_ty = f.typeOf(data.lhs); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -6008,7 +6002,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.initTag(.i32), Value.zero), + try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6017,28 +6011,27 @@ fn airCmpBuiltinCall( } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const ptr = try f.resolveInst(extra.ptr); const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); - const ptr_ty = f.air.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ptr_ty = f.typeOf(extra.ptr); + const ty = ptr_ty.childType(mod); const writer = f.object.writer(); const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const local = try f.allocLocal(inst, inst_ty); - if (inst_ty.isPtrLikeOptional()) { + if (inst_ty.isPtrLikeOptional(mod)) { { const a = try Assignment.start(f, writer, ty); try f.writeCValue(writer, local, .Other); @@ -6051,7 +6044,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6093,7 +6086,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6123,11 +6116,12 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(pl_op.operand); - const ty = ptr_ty.childType(); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(pl_op.operand); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6135,14 +6129,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, writer, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; + const repr_bits = @intCast(u16, ty.abiSize(mod) * 8); const is_float = ty.isRuntimeFloat(); - const is_128 = repr_pl.data == 128; - const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty; + const is_128 = repr_bits == 128; + const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty; const local = try f.allocLocal(inst, inst_ty); try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); @@ -6158,7 +6148,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { if (use_atomic) try writer.writeAll("zig_atomic("); try f.renderType(writer, ty); if (use_atomic) try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6181,20 +6171,19 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); - const ptr_ty = f.air.typeOf(atomic_load.ptr); - const ty = ptr_ty.childType(); + const ptr_ty = f.typeOf(atomic_load.ptr); + const ty = ptr_ty.childType(mod); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6203,7 +6192,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", (zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6218,9 +6207,10 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ptr_ty = f.typeOf(bin_op.lhs); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6228,17 +6218,15 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const element_mat = try Materialize.start(f, inst, writer, ty, element); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6254,7 +6242,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa } fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void { - if (ptr_ty.isSlice()) { + const mod = f.object.dg.module; + if (ptr_ty.isSlice(mod)) { try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" }); } else { try f.writeCValue(writer, ptr, .FunctionArgument); @@ -6262,14 +6251,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo } fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const dest_ty = f.air.typeOf(bin_op.lhs); + const dest_ty = f.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); - const elem_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); - const elem_abi_size = elem_ty.abiSize(target); - const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const elem_ty = f.typeOf(bin_op.rhs); + const elem_abi_size = elem_ty.abiSize(mod); + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6279,7 +6268,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", 0xaa, "); @@ -6291,8 +6280,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.print(", 0xaa, {d});\n", .{len}); @@ -6303,32 +6292,33 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - if (elem_abi_size > 1 or dest_ty.isVolatilePtr()) { + if (elem_abi_size > 1 or dest_ty.isVolatilePtr(mod)) { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .child = elem_ty.ip_index, + .flags = .{ + .size = .C, + }, + }); const index = try f.allocLocal(inst, Type.usize); try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, Type.usize, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" }); }, .One => { - const array_ty = dest_ty.childType(); - try writer.print("{d}", .{array_ty.arrayLen()}); + const array_ty = dest_ty.childType(mod); + try writer.print("{d}", .{array_ty.arrayLen(mod)}); }, .Many, .C => unreachable, } @@ -6357,7 +6347,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const bitcasted = try bitcast(f, Type.u8, value, elem_ty); try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", "); @@ -6367,8 +6357,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll(");\n"); }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.writeAll(", "); @@ -6383,12 +6373,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); - const dest_ty = f.air.typeOf(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); + const dest_ty = f.typeOf(bin_op.lhs); + const src_ty = f.typeOf(bin_op.rhs); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6396,10 +6386,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try writeSliceOrPtr(f, writer, src_ptr, src_ty); try writer.writeAll(", "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { - const elem_ty = dest_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_ty = dest_ty.childType(mod); + const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { try writer.print(" * {d});\n", .{elem_abi_size}); @@ -6408,10 +6398,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const elem_ty = array_ty.childType(mod); + const elem_abi_size = elem_ty.abiSize(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try writer.print("{d});\n", .{len}); }, .Many, .C => unreachable, @@ -6422,16 +6412,16 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - const union_ty = f.air.typeOf(bin_op.lhs).childType(); - const layout = union_ty.unionGetLayout(target); + const union_ty = f.typeOf(bin_op.lhs).childType(mod); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const tag_ty = union_ty.unionTagTypeSafety().?; + const tag_ty = union_ty.unionTagTypeSafety(mod).?; const writer = f.object.writer(); const a = try Assignment.start(f, writer, tag_ty); @@ -6443,17 +6433,17 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const union_ty = f.air.typeOf(ty_op.operand); - const target = f.object.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const union_ty = f.typeOf(ty_op.operand); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); @@ -6465,10 +6455,11 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); - const enum_ty = f.air.typeOf(un_op); + const inst_ty = f.typeOfIndex(inst); + const enum_ty = f.typeOf(un_op); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -6476,7 +6467,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.print(" = {s}(", .{ - try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl() }, .{ .tag_name = enum_ty }), + try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(mod) }, .{ .tag_name = enum_ty }), }); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); @@ -6488,7 +6479,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const local = try f.allocLocal(inst, inst_ty); @@ -6501,13 +6492,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6532,7 +6524,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { const rhs = try f.resolveInst(extra.rhs); try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6555,41 +6547,31 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; - const mask = f.air.values[extra.mask]; + const mask = extra.mask.toValue(); const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const module = f.object.dg.module; - const target = module.getTarget(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands for (0..extra.mask_len) |index| { - var dst_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, index), - }; - try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target); - var src_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, mask_elem ^ mask_elem >> 63), - }; + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); + const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63)); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, src_val, .Other); try writer.writeAll("];\n"); } @@ -6597,16 +6579,16 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const reduce = f.air.instructions.items(.data)[inst].reduce; - const target = f.object.dg.module.getTarget(); - const scalar_ty = f.air.typeOfIndex(inst); + const scalar_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); - const operand_ty = f.air.typeOf(reduce.operand); + const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(target) <= 64; + const use_operator = scalar_ty.bitSize(mod) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; float_op: Func, @@ -6617,28 +6599,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } }, .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } }, .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" }, }, .Float => .{ .float_op = .{ .operation = "fmin" } }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" }, }, .Float => .{ .float_op = .{ .operation = "fmax" } }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits }, }, .Float => .{ .builtin = .{ .operation = "add" } }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits }, }, @@ -6663,43 +6645,42 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, accum, .Other); try writer.writeAll(" = "); - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - - const ExpectedContents = union { - u: Value.Payload.U64, - i: Value.Payload.I64, - f16: Value.Payload.Float_16, - f32: Value.Payload.Float_32, - f64: Value.Payload.Float_64, - f80: Value.Payload.Float_80, - f128: Value.Payload.Float_128, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { - .Or, .Xor, .Add => Value.zero, - .And => switch (scalar_ty.zigTypeTag()) { - .Bool => Value.one, - else => switch (scalar_ty.intInfo(target).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), target), - .signed => Value.negative_one, + .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { + .Bool => Value.false, + .Int => try mod.intValue(scalar_ty, 0), + else => unreachable, + }, + .And => switch (scalar_ty.zigTypeTag(mod)) { + .Bool => Value.true, + .Int => switch (scalar_ty.intInfo(mod).signedness) { + .unsigned => try scalar_ty.maxIntScalar(mod, scalar_ty), + .signed => try mod.intValue(scalar_ty, -1), }, - }, - .Min => switch (scalar_ty.zigTypeTag()) { - .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), target), - .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { - .Bool => Value.zero, - .Int => try scalar_ty.minInt(stack.get(), target), - .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), + .Add => switch (scalar_ty.zigTypeTag(mod)) { + .Int => try mod.intValue(scalar_ty, 0), + .Float => try mod.floatValue(scalar_ty, 0.0), + else => unreachable, + }, + .Mul => switch (scalar_ty.zigTypeTag(mod)) { + .Int => try mod.intValue(scalar_ty, 1), + .Float => try mod.floatValue(scalar_ty, 1.0), + else => unreachable, + }, + .Min => switch (scalar_ty.zigTypeTag(mod)) { + .Bool => Value.true, + .Int => try scalar_ty.maxIntScalar(mod, scalar_ty), + .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), + else => unreachable, + }, + .Max => switch (scalar_ty.zigTypeTag(mod)) { + .Bool => Value.false, + .Int => try scalar_ty.minIntScalar(mod, scalar_ty), + .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, - .Mul => Value.one, }, .Initializer); try writer.writeAll(";\n"); @@ -6753,9 +6734,11 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const inst_ty = f.air.typeOfIndex(inst); - const len = @intCast(usize, inst_ty.arrayLen()); + const inst_ty = f.typeOfIndex(inst); + const len = @intCast(usize, inst_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); @@ -6770,13 +6753,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } } - const target = f.object.dg.module.getTarget(); - const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - switch (inst_ty.zigTypeTag()) { + switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { - const elem_ty = inst_ty.childType(); + const elem_ty = inst_ty.childType(mod); const a = try Assignment.init(f, elem_ty); for (resolved_elements, 0..) |element, i| { try a.restart(f, writer); @@ -6786,7 +6767,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, element, .Other); try a.end(f, writer); } - if (inst_ty.sentinel()) |sentinel| { + if (inst_ty.sentinel(mod)) |sentinel| { try a.restart(f, writer); try f.writeCValue(writer, local, .Other); try writer.print("[{d}]", .{resolved_elements.len}); @@ -6795,17 +6776,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try a.end(f, writer); } }, - .Struct => switch (inst_ty.containerLayout()) { + .Struct => switch (inst_ty.containerLayout(mod)) { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); - try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) + try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod)) .{ .field = field_i } else - .{ .identifier = inst_ty.structFieldName(field_i) }); + .{ .identifier = ip.stringToSlice(inst_ty.structFieldName(field_i, mod)) }); try a.assign(f, writer); try f.writeCValue(writer, element, .Other); try a.end(f, writer); @@ -6813,22 +6794,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Packed => { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - const int_info = inst_ty.intInfo(target); + const int_info = inst_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var empty = true; for (0..elements.len) |field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -6839,9 +6815,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } empty = true; for (resolved_elements, 0..) |element, field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -6849,13 +6825,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); - if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) { + if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) { try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument); } else { try writer.writeByte('('); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (field_ty.isPtrAtRuntime()) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, switch (int_info.signedness) { .unsigned => Type.usize, @@ -6867,12 +6843,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset += field_ty.bitSize(mod); empty = false; } @@ -6886,14 +6863,15 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = f.air.typeOfIndex(inst); - const target = f.object.dg.module.getTarget(); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_ty = f.typeOfIndex(inst); + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; - const payload_ty = f.air.typeOf(extra.init); + const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); @@ -6907,19 +6885,14 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { return local; } - const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { - const layout = union_ty.unionGetLayout(target); + const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: { + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { - const field_index = tag_ty.enumFieldIndex(field_name).?; + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(tag_ty, &int_pl); + const int_val = try tag_val.enumToInt(tag_ty, mod); const a = try Assignment.start(f, writer, tag_ty); try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); @@ -6927,8 +6900,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.print("{}", .{try f.fmtIntLiteral(tag_ty, int_val)}); try a.end(f, writer); } - break :field .{ .payload_identifier = field_name }; - } else .{ .identifier = field_name }; + break :field .{ .payload_identifier = ip.stringToSlice(field_name) }; + } else .{ .identifier = ip.stringToSlice(field_name) }; const a = try Assignment.start(f, writer, payload_ty); try f.writeCValueMember(writer, local, field); @@ -6963,7 +6936,7 @@ fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -6977,7 +6950,7 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); const local = try f.allocLocal(inst, inst_ty); @@ -6991,13 +6964,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); - const scalar_ty = operand_ty.scalarType(); + const operand_ty = f.typeOf(un_op); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, operand_ty); @@ -7016,13 +6990,14 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7043,14 +7018,15 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal } fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const lhs = try f.resolveInst(bin_op.lhs); const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7074,6 +7050,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7082,8 +7059,8 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const addend = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); - const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_ty = f.typeOfIndex(inst); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7108,7 +7085,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete); const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; @@ -7127,7 +7104,7 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -7158,7 +7135,7 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -7279,8 +7256,9 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 { }; } -fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 { - return if (ty.isInt()) switch (ty.intInfo(target).bits) { +fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 { + const target = mod.getTarget(); + return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) { 1...32 => "si", 33...64 => "di", 65...128 => "ti", @@ -7407,7 +7385,7 @@ fn undefPattern(comptime IntType: type) IntType { const FormatIntLiteralContext = struct { dg: *DeclGen, - int_info: std.builtin.Type.Int, + int_info: InternPool.Key.IntType, kind: CType.Kind, cty: CType, val: Value, @@ -7418,7 +7396,8 @@ fn formatIntLiteral( options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const target = data.dg.module.getTarget(); + const mod = data.dg.module; + const target = mod.getTarget(); const ExpectedContents = struct { const base = 10; @@ -7438,7 +7417,7 @@ fn formatIntLiteral( defer allocator.free(undef_limbs); var int_buf: Value.BigIntSpace = undefined; - const int = if (data.val.isUndefDeep()) blk: { + const int = if (data.val.isUndefDeep(mod)) blk: { undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)); @memset(undef_limbs, undefPattern(BigIntLimb)); @@ -7449,7 +7428,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, target); + } else data.val.toBigInt(&int_buf, mod); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8); @@ -7576,10 +7555,6 @@ fn formatIntLiteral( c_limb_int_info.signedness = .unsigned; c_limb_cty = c_limb_info.cty; } - var c_limb_val_pl = Value.Payload.BigInt{ - .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative }, - .data = c_limb_mut.limbs[0..c_limb_mut.len], - }; if (limb_offset > 0) try writer.writeAll(", "); try formatIntLiteral(.{ @@ -7587,7 +7562,7 @@ fn formatIntLiteral( .int_info = c_limb_int_info, .kind = data.kind, .cty = c_limb_cty, - .val = Value.initPayload(&c_limb_val_pl.base), + .val = try mod.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } @@ -7684,20 +7659,21 @@ const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { - return if (ty.zigTypeTag() == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; + const mod = f.object.dg.module; + return if (ty.zigTypeTag(mod) == .Vector) index: { + const len_val = try mod.intValue(Type.usize, ty.vectorLen(mod)); const local = try f.allocLocal(inst, Type.usize); try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ - try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)), + try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; @@ -7721,34 +7697,30 @@ const Vectorize = struct { } }; -const LowerFnRetTyBuffer = struct { - names: [1][]const u8, - types: [1]Type, - values: [1]Value, - payload: Type.Payload.AnonStruct, -}; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type { - if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn); +fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type { + if (ret_ty.ip_index == .noreturn_type) return Type.noreturn; - if (lowersToArray(ret_ty, target)) { - buffer.names = [1][]const u8{"array"}; - buffer.types = [1]Type{ret_ty}; - buffer.values = [1]Value{Value.initTag(.unreachable_value)}; - buffer.payload = .{ .data = .{ - .names = &buffer.names, - .types = &buffer.types, - .values = &buffer.values, - } }; - return Type.initPayload(&buffer.payload.base); + if (lowersToArray(ret_ty, mod)) { + const names = [1]InternPool.NullTerminatedString{ + try mod.intern_pool.getOrPutString(mod.gpa, "array"), + }; + const types = [1]InternPool.Index{ret_ty.ip_index}; + const values = [1]InternPool.Index{.none}; + const interned = try mod.intern(.{ .anon_struct_type = .{ + .names = &names, + .types = &types, + .values = &values, + } }); + return interned.toType(); } - return if (ret_ty.hasRuntimeBitsIgnoreComptime()) ret_ty else Type.void; + return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; } -fn lowersToArray(ty: Type, target: std.Target) bool { - return switch (ty.zigTypeTag()) { +fn lowersToArray(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, - else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null, + else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, }; } @@ -7765,8 +7737,8 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { const ref_inst = Air.refToIndex(ref) orelse return; + assert(f.air.instructions.items(.tag)[ref_inst] != .interned); const c_value = (f.value_map.fetchRemove(ref_inst) orelse return).value; - if (f.air.instructions.items(.tag)[ref_inst] == .constant) return; const local_index = switch (c_value) { .local, .new_local => |l| l, else => return, diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 6116d070e6..81ca1dd80d 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -292,19 +292,19 @@ pub const CType = extern union { .abi = std.math.log2_int(u32, abi_alignment), }; } - pub fn abiAlign(ty: Type, target: Target) AlignAs { - const abi_align = ty.abiAlignment(target); + pub fn abiAlign(ty: Type, mod: *Module) AlignAs { + const abi_align = ty.abiAlignment(mod); return init(abi_align, abi_align); } - pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs { return init( - struct_ty.structFieldAlign(field_i, target), - struct_ty.structFieldType(field_i).abiAlignment(target), + struct_ty.structFieldAlign(field_i, mod), + struct_ty.structFieldType(field_i, mod).abiAlignment(mod), ); } - pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { - const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const union_payload_align = union_obj.abiAlignment(target, false); + pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { + const union_obj = mod.typeToUnion(union_ty).?; + const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); } @@ -344,8 +344,8 @@ pub const CType = extern union { return self.map.entries.items(.hash)[index - Tag.no_payload_count]; } - pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { - const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; + pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index { + const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } }; var convert: Convert = undefined; convert.initType(ty, kind, lookup) catch unreachable; @@ -405,7 +405,7 @@ pub const CType = extern union { ); if (!gop.found_existing) { errdefer _ = self.set.map.pop(); - gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert); + gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert); } if (std.debug.runtime_safety) { const adapter = TypeAdapter64{ @@ -1236,10 +1236,10 @@ pub const CType = extern union { } pub const Lookup = union(enum) { - fail: Target, + fail: *Module, imm: struct { set: *const Store.Set, - target: Target, + mod: *Module, }, mut: struct { promoted: *Store.Promoted, @@ -1254,10 +1254,14 @@ pub const CType = extern union { } pub fn getTarget(self: @This()) Target { + return self.getModule().getTarget(); + } + + pub fn getModule(self: @This()) *Module { return switch (self) { - .fail => |target| target, - .imm => |imm| imm.target, - .mut => |mut| mut.mod.getTarget(), + .fail => |mod| mod, + .imm => |imm| imm.mod, + .mut => |mut| mut.mod, }; } @@ -1272,7 +1276,7 @@ pub const CType = extern union { pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { return switch (self) { .fail => null, - .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind), + .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind), .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind), }; } @@ -1284,7 +1288,7 @@ pub const CType = extern union { pub fn freeze(self: @This()) @This() { return switch (self) { .fail, .imm => self, - .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } }, + .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } }, }; } }; @@ -1338,7 +1342,7 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "array", .type = array_idx, - .alignas = AlignAs.abiAlign(ty, lookup.getTarget()), + .alignas = AlignAs.abiAlign(ty, lookup.getModule()), }; self.initAnon(kind, fwd_idx, 1); } else self.init(switch (kind) { @@ -1350,30 +1354,30 @@ pub const CType = extern union { } pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { - const target = lookup.getTarget(); + const mod = lookup.getModule(); self.* = undefined; - if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt()) switch (ty.tag()) { - .usize => self.init(.uintptr_t), - .isize => self.init(.intptr_t), - .c_char => self.init(.char), - .c_short => self.init(.short), - .c_ushort => self.init(.@"unsigned short"), - .c_int => self.init(.int), - .c_uint => self.init(.@"unsigned int"), - .c_long => self.init(.long), - .c_ulong => self.init(.@"unsigned long"), - .c_longlong => self.init(.@"long long"), - .c_ulonglong => self.init(.@"unsigned long long"), - else => switch (tagFromIntInfo(ty.intInfo(target))) { + else if (ty.isAbiInt(mod)) switch (ty.ip_index) { + .usize_type => self.init(.uintptr_t), + .isize_type => self.init(.intptr_t), + .c_char_type => self.init(.char), + .c_short_type => self.init(.short), + .c_ushort_type => self.init(.@"unsigned short"), + .c_int_type => self.init(.int), + .c_uint_type => self.init(.@"unsigned int"), + .c_long_type => self.init(.long), + .c_ulong_type => self.init(.@"unsigned long"), + .c_longlong_type => self.init(.@"long long"), + .c_ulonglong_type => self.init(.@"unsigned long long"), + else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), .array => switch (kind) { .forward, .complete, .global => { - const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); + const abi_size = ty.abiSize(mod); + const abi_align = ty.abiAlignment(mod); self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ .len = @divExact(abi_size, abi_align), .elem_type = tagFromIntInfo(.{ @@ -1389,7 +1393,7 @@ pub const CType = extern union { .payload => unreachable, }, }, - } else switch (ty.zigTypeTag()) { + } else switch (ty.zigTypeTag(mod)) { .Frame => unreachable, .AnyFrame => unreachable, @@ -1408,18 +1412,18 @@ pub const CType = extern union { .Bool => self.init(.bool), - .Float => self.init(switch (ty.tag()) { - .f16 => .zig_f16, - .f32 => .zig_f32, - .f64 => .zig_f64, - .f80 => .zig_f80, - .f128 => .zig_f128, - .c_longdouble => .zig_c_longdouble, + .Float => self.init(switch (ty.ip_index) { + .f16_type => .zig_f16, + .f32_type => .zig_f32, + .f64_type => .zig_f64, + .f80_type => .zig_f80, + .f128_type => .zig_f128, + .c_longdouble_type => .zig_c_longdouble, else => unreachable, }), .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); switch (info.size) { .Slice => { if (switch (kind) { @@ -1427,19 +1431,18 @@ pub const CType = extern union { .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), .payload => unreachable, }) |fwd_idx| { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(mod); if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { self.storage = .{ .anon = undefined }; self.storage.anon.fields[0] = .{ .name = "ptr", .type = ptr_idx, - .alignas = AlignAs.abiAlign(ptr_ty, target), + .alignas = AlignAs.abiAlign(ptr_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "len", .type = Tag.uintptr_t.toIndex(), - .alignas = AlignAs.abiAlign(Type.usize, target), + .alignas = AlignAs.abiAlign(Type.usize, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1462,16 +1465,12 @@ pub const CType = extern union { }, }; - var host_int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = info.host_size * 8, - }; const pointee_ty = if (info.host_size > 0 and info.vector_index == .none) - Type.initPayload(&host_int_pl.base) + try mod.intType(.unsigned, info.host_size * 8) else info.pointee_type; - if (if (info.size == .C and pointee_ty.tag() == .u8) + if (if (info.size == .C and pointee_ty.ip_index == .u8_type) Tag.char.toIndex() else try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| @@ -1486,26 +1485,24 @@ pub const CType = extern union { } }, - .Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) { - if (ty.castTag(.@"struct")) |struct_obj| { - try self.initType(struct_obj.data.backing_int_ty, kind, lookup); + .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) { + if (mod.typeToStruct(ty)) |struct_obj| { + try self.initType(struct_obj.backing_int_ty, kind, lookup); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - try self.initType(Type.initPayload(&buf.base), kind, lookup); + const bits = @intCast(u16, ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, bits); + try self.initType(int_ty, kind, lookup); } - } else if (ty.isTupleOrAnonStruct()) { + } else if (ty.isTupleOrAnonStruct(mod)) { if (lookup.isMutable()) { for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), - .Union => ty.unionFields().count(), + .Struct => ty.structFieldCount(mod), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, @@ -1533,14 +1530,14 @@ pub const CType = extern union { .payload => unreachable, }); } else { - const tag_ty = ty.unionTagTypeSafety(); + const tag_ty = ty.unionTagTypeSafety(mod); const is_tagged_union_wrapper = kind != .payload and tag_ty != null; const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper; switch (kind) { .forward, .forward_parameter => { self.storage = .{ .fwd = .{ .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, - .data = ty.getOwnerDecl(), + .data = ty.getOwnerDecl(mod), } }; self.value = .{ .cty = initPayload(&self.storage.fwd) }; }, @@ -1555,7 +1552,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "payload", .type = payload_idx.?, - .alignas = AlignAs.unionPayloadAlign(ty, target), + .alignas = AlignAs.unionPayloadAlign(ty, mod), }; field_count += 1; } @@ -1563,7 +1560,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "tag", .type = tag_idx.?, - .alignas = AlignAs.abiAlign(tag_ty.?, target), + .alignas = AlignAs.abiAlign(tag_ty.?, mod), }; field_count += 1; } @@ -1576,19 +1573,19 @@ pub const CType = extern union { } }; self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; } else self.init(.@"struct"); - } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) { + } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) { self.init(.void); } else { var is_packed = false; for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), - .Union => ty.unionFields().count(), + .Struct => ty.structFieldCount(mod), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = AlignAs.fieldAlign(ty, field_i, target); + const field_align = AlignAs.fieldAlign(ty, field_i, mod); if (field_align.@"align" < field_align.abi) { is_packed = true; if (!lookup.isMutable()) break; @@ -1627,9 +1624,9 @@ pub const CType = extern union { .Vector => .vector, else => unreachable, }; - if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| { self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ - .len = ty.arrayLenIncludingSentinel(), + .len = ty.arrayLenIncludingSentinel(mod), .elem_type = child_idx, } } }; self.value = .{ .cty = initPayload(&self.storage.seq) }; @@ -1641,10 +1638,9 @@ pub const CType = extern union { }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - if (ty.optionalReprIsPayload()) { + const payload_ty = ty.optionalChild(mod); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); } else if (switch (kind) { .forward, .forward_parameter => @as(Index, undefined), @@ -1661,12 +1657,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "is_null", .type = Tag.bool.toIndex(), - .alignas = AlignAs.abiAlign(Type.bool, target), + .alignas = AlignAs.abiAlign(Type.bool, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1684,14 +1680,14 @@ pub const CType = extern union { .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), .payload => unreachable, }) |fwd_idx| { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (try lookup.typeToIndex(payload_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, .global => .global, .payload => unreachable, })) |payload_idx| { - const error_ty = ty.errorUnionSet(); + const error_ty = ty.errorUnionSet(mod); if (payload_idx == Tag.void.toIndex()) { try self.initType(error_ty, kind, lookup); } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { @@ -1699,12 +1695,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "error", .type = error_idx, - .alignas = AlignAs.abiAlign(error_ty, target), + .alignas = AlignAs.abiAlign(error_ty, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1723,7 +1719,7 @@ pub const CType = extern union { .Opaque => self.init(.void), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (!info.is_generic) { if (lookup.isMutable()) { const param_kind: Kind = switch (kind) { @@ -1731,10 +1727,10 @@ pub const CType = extern union { .complete, .parameter, .global => .parameter, .payload => unreachable, }; - _ = try lookup.typeToIndex(info.return_type, param_kind); + _ = try lookup.typeToIndex(info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - _ = try lookup.typeToIndex(param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + _ = try lookup.typeToIndex(param_type.toType(), param_kind); } } self.init(if (info.is_var_args) .varargs_function else .function); @@ -1900,16 +1896,16 @@ pub const CType = extern union { } } - fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType { + fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType { var convert: Convert = undefined; - try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } }); - return createFromConvert(store, ty, target, kind, &convert); + try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } }); + return createFromConvert(store, ty, mod, kind, &convert); } fn createFromConvert( store: *Store.Promoted, ty: Type, - target: Target, + mod: *Module, kind: Kind, convert: Convert, ) !CType { @@ -1930,44 +1926,44 @@ pub const CType = extern union { .packed_struct, .packed_union, => { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), - .Union => ty.unionFields().count(), + .Struct => ty.structFieldCount(mod), + .Union => ty.unionFields(mod).count(), else => unreachable, }; var c_fields_len: usize = 0; for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; c_fields_len += 1; } const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); var c_field_i: usize = 0; for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; fields_pl[c_field_i] = .{ - .name = try if (ty.isSimpleTuple()) + .name = try if (ty.isSimpleTuple(mod)) std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else - arena.dupeZ(u8, switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), - .Union => ty.unionFields().keys()[field_i], + arena.dupeZ(u8, mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, - }), - .type = store.set.typeToIndex(field_ty, target, switch (kind) { + })), + .type = store.set.typeToIndex(field_ty, mod, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter, .payload => .complete, .global => .global, }).?, - .alignas = AlignAs.fieldAlign(ty, field_i, target), + .alignas = AlignAs.fieldAlign(ty, field_i, mod), }; } @@ -1988,8 +1984,8 @@ pub const CType = extern union { const unnamed_pl = try arena.create(Payload.Unnamed); unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .owner_decl = ty.getOwnerDecl(), - .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable, + .owner_decl = ty.getOwnerDecl(mod), + .id = if (ty.unionTagTypeSafety(mod)) |_| 0 else unreachable, } }; return initPayload(unnamed_pl); }, @@ -2004,7 +2000,7 @@ pub const CType = extern union { const struct_pl = try arena.create(Payload.Aggregate); struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?, } }; return initPayload(struct_pl); }, @@ -2016,7 +2012,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (kind) { .forward, .forward_parameter => .forward_parameter, @@ -2026,21 +2022,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2067,33 +2063,33 @@ pub const CType = extern union { } pub fn eql(self: @This(), ty: Type, cty: CType) bool { + const mod = self.lookup.getModule(); switch (self.convert.value) { .cty => |c| return c.eql(cty), .tag => |t| { if (t != cty.tag()) return false; - const target = self.lookup.getTarget(); switch (t) { .fwd_anon_struct, .fwd_anon_union, => { - if (!ty.isTupleOrAnonStruct()) return false; + if (!ty.isTupleOrAnonStruct(mod)) return false; var name_buf: [ std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; const c_fields = cty.cast(Payload.Fields).?.data; - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), - .Union => ty.unionFields().count(), + .Struct => ty.structFieldCount(mod), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; const c_field = &c_fields[c_field_i]; @@ -2105,15 +2101,16 @@ pub const CType = extern union { .payload => unreachable, }) or !mem.eql( u8, - if (ty.isSimpleTuple()) - std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), - .Union => ty.unionFields().keys()[field_i], - else => unreachable, - }, + if (ty.isSimpleTuple(mod)) + std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable + else + mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], + else => unreachable, + }), mem.span(c_field.name), - ) or AlignAs.fieldAlign(ty, field_i, target).@"align" != + ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" != c_field.alignas.@"align") return false; } return true; @@ -2125,9 +2122,9 @@ pub const CType = extern union { .packed_unnamed_union, => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety()) |_| { + .payload => if (ty.unionTagTypeSafety(mod)) |_| { const data = cty.cast(Payload.Unnamed).?.data; - return ty.getOwnerDecl() == data.owner_decl and data.id == 0; + return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0; } else unreachable, }, @@ -2146,9 +2143,9 @@ pub const CType = extern union { .function, .varargs_function, => { - if (ty.zigTypeTag() != .Fn) return false; + if (ty.zigTypeTag(mod) != .Fn) return false; - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const data = cty.cast(Payload.Function).?.data; const param_kind: Kind = switch (self.kind) { @@ -2157,18 +2154,18 @@ pub const CType = extern union { .payload => unreachable, }; - if (!self.eqlRecurse(info.return_type, data.return_type, param_kind)) + if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind)) return false; var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; c_param_i += 1; - if (!self.eqlRecurse(param_type, param_cty, param_kind)) + if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind)) return false; } return c_param_i == data.param_types.len; @@ -2202,7 +2199,7 @@ pub const CType = extern union { .tag => |t| { autoHash(hasher, t); - const target = self.lookup.getTarget(); + const mod = self.lookup.getModule(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2211,15 +2208,15 @@ pub const CType = extern union { std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; - const zig_ty_tag = ty.zigTypeTag(); - for (0..switch (ty.zigTypeTag()) { - .Struct => ty.structFieldCount(), - .Union => ty.unionFields().count(), + const zig_ty_tag = ty.zigTypeTag(mod); + for (0..switch (ty.zigTypeTag(mod)) { + .Struct => ty.structFieldCount(mod), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { .forward, .forward_parameter => .forward, @@ -2227,14 +2224,15 @@ pub const CType = extern union { .global => .global, .payload => unreachable, }); - hasher.update(if (ty.isSimpleTuple()) + hasher.update(if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), - .Union => ty.unionFields().keys()[field_i], - else => unreachable, - }); - autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align"); + else + mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], + else => unreachable, + })); + autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); } }, @@ -2244,8 +2242,8 @@ pub const CType = extern union { .packed_unnamed_union, => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety()) |_| { - autoHash(hasher, ty.getOwnerDecl()); + .payload => if (ty.unionTagTypeSafety(mod)) |_| { + autoHash(hasher, ty.getOwnerDecl(mod)); autoHash(hasher, @as(u32, 0)); } else unreachable, }, @@ -2261,7 +2259,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (self.kind) { .forward, .forward_parameter => .forward_parameter, @@ -2269,10 +2267,10 @@ pub const CType = extern union { .payload => unreachable, }; - self.updateHasherRecurse(hasher, info.return_type, param_kind); + self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - self.updateHasherRecurse(hasher, param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + self.updateHasherRecurse(hasher, param_type.toType(), param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index beb2309455..47be4148d3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -12,6 +12,7 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); @@ -361,15 +362,11 @@ pub const Object = struct { decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), - /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of - /// the compiler, but the Type/Value memory here is backed by `type_map_arena`. - /// TODO we need to remove entries from this map in response to incremental compilation - /// but I think the frontend won't tell us about types that get deleted because - /// hasRuntimeBits() is false for types. + /// Maps Zig types to LLVM types. The table memory is backed by the GPA of + /// the compiler. + /// TODO when InternPool garbage collection is implemented, this map needs + /// to be garbage collected as well. type_map: TypeMap, - /// The backing memory for `type_map`. Periodically garbage collected after flush(). - /// The code for doing the periodical GC is not yet implemented. - type_map_arena: std.heap.ArenaAllocator, di_type_map: DITypeMap, /// The LLVM global table which holds the names corresponding to Zig errors. /// Note that the values are not added until flushModule, when all errors in @@ -380,21 +377,11 @@ pub const Object = struct { /// name collision. extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void), - pub const TypeMap = std.HashMapUnmanaged( - Type, - *llvm.Type, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ); + pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type); /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we /// want to iterate over it while adding entries to it. - pub const DITypeMap = std.ArrayHashMapUnmanaged( - Type, - AnnotatedDITypePtr, - Type.HashContext32, - true, - ); + pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr); pub fn create(gpa: Allocator, options: link.Options) !*Object { const obj = try gpa.create(Object); @@ -542,7 +529,6 @@ pub const Object = struct { .decl_map = .{}, .named_enum_map = .{}, .type_map = .{}, - .type_map_arena = std.heap.ArenaAllocator.init(gpa), .di_type_map = .{}, .error_name_table = null, .extern_collisions = .{}, @@ -562,7 +548,6 @@ pub const Object = struct { self.decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); - self.type_map_arena.deinit(); self.extern_collisions.deinit(gpa); self.* = undefined; } @@ -597,16 +582,16 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_ty = Type.slice_const_u8_sentinel_0; + const slice_alignment = slice_ty.abiAlignment(mod); - const error_name_list = mod.error_name_list.items; + const error_name_list = mod.global_error_set.keys(); const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); defer mod.gpa.free(llvm_errors); llvm_errors[0] = llvm_slice_ty.getUndef(); - for (llvm_errors[1..], 0..) |*llvm_error, i| { - const name = error_name_list[1..][i]; + for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { + const name = mod.intern_pool.stringToSlice(name_nts); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_global = self.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); @@ -686,7 +671,7 @@ pub const Object = struct { const llvm_global = entry.value_ptr.*; // Same logic as below but for externs instead of exports. const decl = mod.declPtr(decl_index); - const other_global = object.getLlvmGlobal(decl.name) orelse continue; + const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue; if (other_global == llvm_global) continue; llvm_global.replaceAllUsesWith(other_global); @@ -702,12 +687,9 @@ pub const Object = struct { for (export_list.items) |exp| { // Detect if the LLVM global has already been created as an extern. In such // case, we need to replace all uses of it with this exported global. - // TODO update std.builtin.ExportOptions to have the name be a - // null-terminated slice. - const exp_name_z = try mod.gpa.dupeZ(u8, exp.options.name); - defer mod.gpa.free(exp_name_z); + const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); - const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue; + const other_global = object.getLlvmGlobal(exp_name.ptr) orelse continue; if (other_global == llvm_global) continue; other_global.replaceAllUsesWith(llvm_global); @@ -880,28 +862,29 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - module: *Module, - func: *Module.Fn, + mod: *Module, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, ) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); - const target = module.getTarget(); + const decl = mod.declPtr(decl_index); + const target = mod.getTarget(); var dg: DeclGen = .{ .context = o.context, .object = o, - .module = module, + .module = mod, .decl_index = decl_index, .decl = decl, .err_msg = null, - .gpa = module.gpa, + .gpa = mod.gpa, }; const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (module.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func_index)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -922,7 +905,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - const ssp_buf_size = module.comp.bin_file.options.stack_protector; + const ssp_buf_size = mod.comp.bin_file.options.stack_protector; if (ssp_buf_size != 0) { var buf: [12]u8 = undefined; const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable; @@ -931,15 +914,14 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - if (module.comp.bin_file.options.stack_check) { + if (mod.comp.bin_file.options.stack_check) { dg.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack"); } else if (target.os.tag == .uefi) { dg.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } - if (decl.@"linksection") |section| { + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| llvm_func.setSection(section); - } // Remove all the basic blocks of a function in order to start over, generating // LLVM IR from an empty function body. @@ -953,18 +935,18 @@ pub const Object = struct { builder.positionBuilderAtEnd(entry_block); // This gets the LLVM values from the function and stores them in `dg.args`. - const fn_info = decl.ty.fnInfo(); - const sret = firstParamSRet(fn_info, target); + const fn_info = mod.typeToFunc(decl.ty).?; + const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError() and - module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.toType().isError(mod) and + mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) llvm_func.getParam(@boolToInt(ret_ptr != null)) @@ -985,12 +967,12 @@ pub const Object = struct { .byval => { assert(!it.byval_attr); const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = param.typeOf(); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); @@ -1004,17 +986,17 @@ pub const Object = struct { llvm_arg_i += 1; }, .byref => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1023,17 +1005,17 @@ pub const Object = struct { } }, .byref_mut => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addArgAttr(llvm_func, llvm_arg_i, "noundef"); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1043,15 +1025,15 @@ pub const Object = struct { }, .abi_sized_int => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; const param_llvm_ty = try dg.lowerType(param_ty); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = dg.context.intType(abi_size * 8); const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); @@ -1060,7 +1042,7 @@ pub const Object = struct { try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1070,15 +1052,15 @@ pub const Object = struct { }, .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -1087,7 +1069,7 @@ pub const Object = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align); } const ptr_param = llvm_func.getParam(llvm_arg_i); @@ -1103,9 +1085,9 @@ pub const Object = struct { .multiple_llvm_types => { assert(!it.byval_attr); const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(target); + const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); for (field_types, 0..) |_, field_i_usize| { @@ -1117,7 +1099,7 @@ pub const Object = struct { store_inst.setAlignment(target.ptrBitWidth() / 8); } - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const loaded = if (is_by_ref) arg_ptr else l: { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); load_inst.setAlignment(param_alignment); @@ -1134,16 +1116,16 @@ pub const Object = struct { args.appendAssumeCapacity(casted); }, .float_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1152,16 +1134,16 @@ pub const Object = struct { } }, .i32_array, .i64_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1176,27 +1158,28 @@ pub const Object = struct { var di_scope: ?*llvm.DIScope = null; if (dg.object.di_builder) |dib| { - di_file = try dg.object.getDIFile(gpa, decl.src_namespace.file_scope); + di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = decl.val.tag() != .extern_fn and - !module.decl_exports.contains(decl_index); - const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) + const is_internal_linkage = decl.val.getExternFunc(mod) == null and + !mod.decl_exports.contains(decl_index); + const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn else 0; + const decl_di_ty = try o.lowerDebugType(decl.ty, .full); const subprogram = dib.createFunction( di_file.?.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), llvm_func.getValueName(), di_file.?, line_number, - try o.lowerDebugType(decl.ty, .full), + decl_di_ty, is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember | noret_bit, - module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); try dg.object.di_map.put(gpa, decl, subprogram.toNode()); @@ -1219,7 +1202,7 @@ pub const Object = struct { .func_inst_table = .{}, .llvm_func = llvm_func, .blocks = .{}, - .single_threaded = module.comp.bin_file.options.single_threaded, + .single_threaded = mod.comp.bin_file.options.single_threaded, .di_scope = di_scope, .di_file = di_file, .base_line = dg.decl.src_line, @@ -1232,14 +1215,14 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try mod.failed_decls.put(mod.gpa, decl_index, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void { @@ -1275,63 +1258,72 @@ pub const Object = struct { pub fn updateDeclExports( self: *Object, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { + const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; - const decl = module.declPtr(decl_index); - if (decl.isExtern()) { - const is_wasm_fn = module.getTarget().isWasm() and try decl.isFunction(); - const mangle_name = is_wasm_fn and - decl.getExternFn().?.lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); - const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(module.gpa, "{s}|{s}", .{ decl.name, decl.getExternFn().?.lib_name.? }); - break :name tmp.ptr; - } else decl.name; - defer if (mangle_name) module.gpa.free(std.mem.sliceTo(decl_name, 0)); + const decl = mod.declPtr(decl_index); + if (decl.isExtern(mod)) { + var free_decl_name = false; + const decl_name = decl_name: { + const decl_name = mod.intern_pool.stringToSlice(decl.name); + + if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { + if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + free_decl_name = true; + break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ + decl_name, lib_name, + }); + } + } + } + + break :decl_name decl_name; + }; + defer if (free_decl_name) gpa.free(decl_name); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { if (other_global != llvm_global) { - log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name}); - try self.extern_collisions.put(module.gpa, decl_index, {}); + try self.extern_collisions.put(gpa, decl_index, {}); } } llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); - const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); + const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_func.replaceLinkageName(linkage_name); } else { const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); - const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); + const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_global.replaceLinkageName(linkage_name); } } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.val.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) { + if (variable.is_weak_linkage) { llvm_global.setLinkage(.ExternalWeak); } } } else if (exports.len != 0) { - const exp_name = exports[0].options.name; + const exp_name = mod.intern_pool.stringToSlice(exports[0].opts.name); llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_func.replaceLinkageName(linkage_name); @@ -1341,37 +1333,34 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - switch (exports[0].options.linkage) { + switch (exports[0].opts.linkage) { .Internal => unreachable, .Strong => llvm_global.setLinkage(.External), .Weak => llvm_global.setLinkage(.WeakODR), .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), } - switch (exports[0].options.visibility) { + switch (exports[0].opts.visibility) { .default => llvm_global.setVisibility(.Default), .hidden => llvm_global.setVisibility(.Hidden), .protected => llvm_global.setVisibility(.Protected), } - if (exports[0].options.section) |section| { - const section_z = try module.gpa.dupeZ(u8, section); - defer module.gpa.free(section_z); - llvm_global.setSection(section_z); + if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| { + llvm_global.setSection(section); } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.val.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } } // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. - // TODO LLVM C API does not support deleting aliases. We need to - // patch it to support this or figure out how to wrap the C++ API ourselves. + // TODO LLVM C API does not support deleting aliases. + // The planned solution to this is https://github.com/ziglang/zig/issues/13265 // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name); - defer module.gpa.free(exp_name_z); + const exp_name_z = mod.intern_pool.stringToSlice(exp.opts.name); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -1385,15 +1374,14 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); - if (decl.val.castTag(.variable)) |variable| { - const single_threaded = module.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.val.getVariable(mod)) |variable| { + const single_threaded = mod.comp.bin_file.options.single_threaded; + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); @@ -1444,7 +1432,7 @@ pub const Object = struct { const gpa = o.gpa; // Be careful not to reference this `gop` variable after any recursive calls // to `lowerDebugType`. - const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module }); + const gop = try o.di_type_map.getOrPut(gpa, ty.toIntern()); if (gop.found_existing) { const annotated = gop.value_ptr.*; const di_type = annotated.toDIType(); @@ -1457,10 +1445,7 @@ pub const Object = struct { }; return o.lowerDebugTypeImpl(entry, resolve, di_type); } - errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module })); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator()); + errdefer assert(o.di_type_map.orderedRemove(ty.toIntern())); const entry: Object.DITypeMap.Entry = .{ .key_ptr = gop.key_ptr, .value_ptr = gop.value_ptr, @@ -1475,18 +1460,19 @@ pub const Object = struct { resolve: DebugResolveStatus, opt_fwd_decl: ?*llvm.DIType, ) Allocator.Error!*llvm.DIType { - const ty = gop.key_ptr.*; + const ty = gop.key_ptr.toType(); const gpa = o.gpa; const target = o.target; const dib = o.di_builder.?; - switch (ty.zigTypeTag()) { + const mod = o.module; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => { const di_type = dib.createBasicType("void", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1494,49 +1480,41 @@ pub const Object = struct { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_type = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; }, .Enum => { - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const owner_decl = o.module.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty)); return enum_di_ty; } - const field_names = ty.enumFields().keys(); + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; - const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len); + const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); defer gpa.free(enumerators); - var buf_field_index: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = undefined, - }; - const field_index_val = Value.initPayload(&buf_field_index.base); - - var buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buffer); - const int_info = ty.intInfo(target); + const int_ty = enum_type.tag_ty.toType(); + const int_info = ty.intInfo(mod); assert(int_info.bits != 0); - for (field_names, 0..) |field_name, i| { - const field_name_z = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_z); - - buf_field_index.data = @intCast(u32, i); - var buf_u64: Value.Payload.U64 = undefined; - const field_int_val = field_index_val.enumToInt(ty, &buf_u64); + for (enum_type.names, 0..) |field_name_ip, i| { + const field_name_z = ip.stringToSlice(field_name_ip); var bigint_space: Value.BigIntSpace = undefined; - const bigint = field_int_val.toBigInt(&bigint_space, target); + const bigint = if (enum_type.values.len != 0) + enum_type.values[i].toValue().toBigInt(&bigint_space, mod) + else + std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -1555,7 +1533,7 @@ pub const Object = struct { @panic("TODO implement bigint debug enumerators to llvm int for 32-bit compiler builds"); } - const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope); + const di_file = try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope); const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace); const name = try ty.nameAlloc(gpa, o.module); @@ -1566,15 +1544,15 @@ pub const Object = struct { name, di_file, owner_decl.src_node + 1, - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, enumerators.ptr, @intCast(c_int, enumerators.len), try o.lowerDebugType(int_ty, .full), "", ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty)); return enum_di_ty; }, .Float => { @@ -1593,49 +1571,40 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo().data; + const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern()); - if (ptr_info.sentinel != null or - ptr_info.@"addrspace" != .generic or - ptr_info.bit_offset != 0 or - ptr_info.host_size != 0 or - ptr_info.vector_index != .none or - ptr_info.@"allowzero" or - !ptr_info.mutable or - ptr_info.@"volatile" or - ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) + if (ptr_info.sentinel != .none or + ptr_info.flags.address_space != .generic or + ptr_info.packed_offset.bit_offset != 0 or + ptr_info.packed_offset.host_size != 0 or + ptr_info.flags.vector_index != .none or + ptr_info.flags.is_allowzero or + ptr_info.flags.is_const or + ptr_info.flags.is_volatile or + ptr_info.flags.size == .Many or ptr_info.flags.size == .C or + !ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod)) { - var payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = ptr_info.pointee_type, - .sentinel = null, - .@"align" = ptr_info.@"align", - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = switch (ptr_info.size) { + const bland_ptr_ty = try mod.ptrType(.{ + .child = if (!ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod)) + .anyopaque_type + else + ptr_info.child, + .flags = .{ + .alignment = ptr_info.flags.alignment, + .size = switch (ptr_info.flags.size) { .Many, .C, .One => .One, .Slice => .Slice, }, }, - }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { - payload.data.pointee_type = Type.anyopaque; - } - const bland_ptr_ty = Type.initPayload(&payload.base); + }); const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve)); return ptr_di_ty; } - if (ty.isSlice()) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + if (ty.isSlice(mod)) { + const ptr_ty = ty.slicePtrFieldType(mod); const len_ty = Type.usize; const name = try ty.nameAlloc(gpa, o.module); @@ -1657,10 +1626,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const ptr_size = ptr_ty.abiSize(target); - const ptr_align = ptr_ty.abiAlignment(target); - const len_size = len_ty.abiSize(target); - const len_align = len_ty.abiAlignment(target); + const ptr_size = ptr_ty.abiSize(mod); + const ptr_align = ptr_ty.abiAlignment(mod); + const len_size = len_ty.abiSize(mod); + const len_align = len_ty.abiAlignment(mod); var offset: u64 = 0; offset += ptr_size; @@ -1697,8 +1666,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1709,65 +1678,65 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; } - const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd); + const elem_di_ty = try o.lowerDebugType(ptr_info.child.toType(), .fwd); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const ptr_di_ty = dib.createPointerType( elem_di_ty, target.ptrBitWidth(), - ty.ptrAlignment(target) * 8, + ty.ptrAlignment(mod) * 8, name, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(ptr_di_ty)); return ptr_di_ty; }, .Opaque => { - if (ty.tag() == .anyopaque) { + if (ty.toIntern() == .anyopaque_type) { const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const owner_decl = o.module.declPtr(owner_decl_index); const opaque_di_ty = dib.createForwardDeclType( DW.TAG.structure_type, name, try o.namespaceToDebugScope(owner_decl.src_namespace), - try o.getDIFile(gpa, owner_decl.src_namespace.file_scope), + try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope), owner_decl.src_node + 1, ); // The recursive call to `lowerDebugType` va `namespaceToDebugScope` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(opaque_di_ty)); return opaque_di_ty; }, .Array => { const array_di_ty = dib.createArrayType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, - try o.lowerDebugType(ty.childType(), .full), - @intCast(c_int, ty.arrayLen()), + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, + try o.lowerDebugType(ty.childType(mod), .full), + @intCast(c_int, ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty)); return array_di_ty; }, .Vector => { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); // Vector elements cannot be padded since that would make // @bitSizOf(elem) * len > @bitSizOf(vec). // Neither gdb nor lldb seem to be able to display non-byte sized // vectors properly. - const elem_di_type = switch (elem_ty.zigTypeTag()) { + const elem_di_type = switch (elem_ty.zigTypeTag(mod)) { .Int => blk: { - const info = elem_ty.intInfo(target); + const info = elem_ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1778,34 +1747,33 @@ pub const Object = struct { break :blk dib.createBasicType(name, info.bits, dwarf_encoding); }, .Bool => dib.createBasicType("bool", 1, DW.ATE.boolean), - else => try o.lowerDebugType(ty.childType(), .full), + else => try o.lowerDebugType(ty.childType(mod), .full), }; const vector_di_ty = dib.createVectorType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, elem_di_type, - ty.vectorLen(), + ty.vectorLen(mod), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(vector_di_ty)); return vector_di_ty; }, .Optional => { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + const child_ty = ty.optionalChild(mod); + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve)); return ptr_di_ty; } @@ -1826,10 +1794,10 @@ pub const Object = struct { }; const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(target); - const payload_align = child_ty.abiAlignment(target); - const non_null_size = non_null_ty.abiSize(target); - const non_null_align = non_null_ty.abiAlignment(target); + const payload_size = child_ty.abiSize(mod); + const payload_align = child_ty.abiAlignment(mod); + const non_null_size = non_null_ty.abiSize(mod); + const non_null_align = non_null_ty.abiAlignment(mod); var offset: u64 = 0; offset += payload_size; @@ -1866,8 +1834,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1878,15 +1846,15 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty)); return err_set_di_ty; } const name = try ty.nameAlloc(gpa, o.module); @@ -1907,10 +1875,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const error_size = Type.anyerror.abiSize(target); - const error_align = Type.anyerror.abiAlignment(target); - const payload_size = payload_ty.abiSize(target); - const payload_align = payload_ty.abiAlignment(target); + const error_size = Type.anyerror.abiSize(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_size = payload_ty.abiSize(mod); + const payload_align = payload_ty.abiAlignment(mod); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -1957,8 +1925,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1969,7 +1937,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .ErrorSet => { @@ -1984,16 +1952,15 @@ pub const Object = struct { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { assert(struct_obj.haveLayout()); - const info = struct_obj.backing_int_ty.intInfo(target); + const info = struct_obj.backing_int_ty.intInfo(mod); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -2013,98 +1980,98 @@ pub const Object = struct { break :blk fwd_decl; }; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |tuple| { + var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; + defer di_fields.deinit(gpa); - var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; - defer di_fields.deinit(gpa); + try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); - try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + const field_size = field_ty.toType().abiSize(mod); + const field_align = field_ty.toType().abiAlignment(mod); + const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = field_offset + field_size; - const field_size = field_ty.abiSize(target); - const field_align = field_ty.abiAlignment(target); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); - offset = field_offset + field_size; + const field_name = if (tuple.names.len != 0) + mod.intern_pool.stringToSlice(tuple.names[i]) + else + try std.fmt.allocPrintZ(gpa, "{d}", .{i}); + defer if (tuple.names.len == 0) gpa.free(field_name); - const field_name = if (ty.castTag(.anon_struct)) |payload| - try gpa.dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(gpa, "{d}", .{i}); - defer gpa.free(field_name); + try di_fields.append(gpa, dib.createMemberType( + fwd_decl.toScope(), + field_name, + null, // file + 0, // line + field_size * 8, // size in bits + field_align * 8, // align in bits + field_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(field_ty.toType(), .full), + )); + } - try di_fields.append(gpa, dib.createMemberType( - fwd_decl.toScope(), - field_name, + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, null, // file 0, // line - field_size * 8, // size in bits - field_align * 8, // align in bits - field_offset * 8, // offset in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags - try o.lowerDebugType(field_ty, .full), - )); - } + null, // derived from + di_fields.items.ptr, + @intCast(c_int, di_fields.items.len), + 0, // run time lang + null, // vtable holder + "", // unique id + ); + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); + return full_di_ty; + }, + .struct_type => |struct_type| s: { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; - const full_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - null, // file - 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits - 0, // flags - null, // derived from - di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, full_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); - return full_di_ty; + if (!struct_obj.haveFieldTypes()) { + // This can happen if a struct type makes it all the way to + // flush() without ever being instantiated or referenced (even + // via pointer). The only reason we are hearing about it now is + // that it is being used as a namespace to put other debug types + // into. Therefore we can satisfy this by making an empty namespace, + // rather than changing the frontend to unnecessarily resolve the + // struct field types. + const owner_decl_index = ty.getOwnerDecl(mod); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); + dib.replaceTemporary(fwd_decl, struct_di_ty); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty)); + return struct_di_ty; + } + }, + else => {}, } - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; - if (!struct_obj.haveFieldTypes()) { - // This can happen if a struct type makes it all the way to - // flush() without ever being instantiated or referenced (even - // via pointer). The only reason we are hearing about it now is - // that it is being used as a namespace to put other debug types - // into. Therefore we can satisfy this by making an empty namespace, - // rather than changing the frontend to unnecessarily resolve the - // struct field types. - const owner_decl_index = ty.getOwnerDecl(); - const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); - dib.replaceTemporary(fwd_decl, struct_di_ty); - // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` - // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); - return struct_di_ty; - } - } - - if (!ty.hasRuntimeBitsIgnoreComptime()) { - const owner_decl_index = ty.getOwnerDecl(); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { + const owner_decl_index = ty.getOwnerDecl(mod); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty)); return struct_di_ty; } - const fields = ty.structFields(); - const layout = ty.containerLayout(); + const fields = ty.structFields(mod); + const layout = ty.containerLayout(mod); var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; defer di_fields.deinit(gpa); @@ -2114,16 +2081,15 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_size = field.ty.abiSize(target); - const field_align = field.alignment(target, layout); + const field_size = field.ty.abiSize(mod); + const field_align = field.alignment(mod, layout); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; - const field_name = try gpa.dupeZ(u8, fields.keys()[field_and_index.index]); - defer gpa.free(field_name); + const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); try di_fields.append(gpa, dib.createMemberType( fwd_decl.toScope(), @@ -2143,8 +2109,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2155,12 +2121,12 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .Union => { const compile_unit_scope = o.di_compile_unit.?.toScope(); - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -2178,17 +2144,17 @@ pub const Object = struct { break :blk fwd_decl; }; - const union_obj = ty.cast(Type.Payload.Union).?.data; - if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime()) { + const union_obj = mod.typeToUnion(ty).?; + if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) { const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty)); return union_di_ty; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { const tag_di_ty = try o.lowerDebugType(union_obj.tag_ty, .full); @@ -2198,8 +2164,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &di_fields, @@ -2211,7 +2177,7 @@ pub const Object = struct { dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; } @@ -2225,24 +2191,22 @@ pub const Object = struct { const field_name = kv.key_ptr.*; const field = kv.value_ptr.*; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); - - const field_name_copy = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_copy); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); + const field_di_ty = try o.lowerDebugType(field.ty, .full); di_fields.appendAssumeCapacity(dib.createMemberType( fwd_decl.toScope(), - field_name_copy, + mod.intern_pool.stringToSlice(field_name), null, // file 0, // line field_size * 8, // size in bits field_align * 8, // align in bits 0, // offset in bits 0, // flags - try o.lowerDebugType(field.ty, .full), + field_di_ty, )); } @@ -2258,8 +2222,8 @@ pub const Object = struct { union_name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, @intCast(c_int, di_fields.items.len), @@ -2270,7 +2234,7 @@ pub const Object = struct { if (layout.tag_size == 0) { dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty)); return union_di_ty; } @@ -2319,8 +2283,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &full_di_fields, @@ -2331,53 +2295,42 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; var param_di_types = std.ArrayList(*llvm.DIType).init(gpa); defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { - const sret = firstParamSRet(fn_info, target); - const di_ret_ty = if (sret) Type.void else fn_info.return_type; + if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { + const sret = firstParamSRet(fn_info, mod); + const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType(); try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = fn_info.return_type, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError() and + if (fn_info.return_type.toType().isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = o.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } - for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + for (0..mod.typeToFunc(ty).?.param_types.len) |i| { + const param_ty = mod.typeToFunc(ty).?.param_types[i].toType(); + if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty)) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = param_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + if (isByRef(param_ty, mod)) { + const ptr_ty = try mod.singleMutPtrType(param_ty); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { try param_di_types.append(try o.lowerDebugType(param_ty, .full)); @@ -2390,7 +2343,7 @@ pub const Object = struct { 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(fn_di_ty)); return fn_di_ty; }, .ComptimeInt => unreachable, @@ -2405,8 +2358,10 @@ pub const Object = struct { } } - fn namespaceToDebugScope(o: *Object, namespace: *const Module.Namespace) !*llvm.DIScope { - if (namespace.parent == null) { + fn namespaceToDebugScope(o: *Object, namespace_index: Module.Namespace.Index) !*llvm.DIScope { + const mod = o.module; + const namespace = mod.namespacePtr(namespace_index); + if (namespace.parent == .none) { const di_file = try o.getDIFile(o.gpa, namespace.file_scope); return di_file.toScope(); } @@ -2418,12 +2373,14 @@ pub const Object = struct { /// Assertion `!isa(Scope) && "shouldn't make a namespace scope for a type"' /// when targeting CodeView (Windows). fn makeEmptyNamespaceDIType(o: *Object, decl_index: Module.Decl.Index) !*llvm.DIType { - const decl = o.module.declPtr(decl_index); + const mod = o.module; + const decl = mod.declPtr(decl_index); const fields: [0]*llvm.DIType = .{}; + const di_scope = try o.namespaceToDebugScope(decl.src_namespace); return o.di_builder.?.createStructType( - try o.namespaceToDebugScope(decl.src_namespace), - decl.name, // TODO use fully qualified name - try o.getDIFile(o.gpa, decl.src_namespace.file_scope), + di_scope, + mod.intern_pool.stringToSlice(decl.name), // TODO use fully qualified name + try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope), decl.src_line + 1, 0, // size in bits 0, // align in bits @@ -2437,28 +2394,28 @@ pub const Object = struct { ); } - fn getStackTraceType(o: *Object) Type { + fn getStackTraceType(o: *Object) Allocator.Error!Type { const mod = o.module; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; - const builtin_str: []const u8 = "builtin"; - const std_namespace = mod.declPtr(std_file.root_decl.unwrap().?).src_namespace; + const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin"); + const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls .getKeyAdapted(builtin_str, Module.DeclAdapter{ .mod = mod }).?; - const stack_trace_str: []const u8 = "StackTrace"; + const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace"); // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = mod.declPtr(builtin_decl).val.toType(undefined); - const builtin_namespace = builtin_ty.getNamespace().?; + const builtin_ty = mod.declPtr(builtin_decl).val.toType(); + const builtin_namespace = builtin_ty.getNamespace(mod).?; const stack_trace_decl_index = builtin_namespace.decls .getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?; const stack_trace_decl = mod.declPtr(stack_trace_decl_index); // Sema should have ensured that StackTrace was analyzed. assert(stack_trace_decl.has_tv); - return stack_trace_decl.val.toType(undefined); + return stack_trace_decl.val.toType(); } }; @@ -2474,7 +2431,8 @@ pub const DeclGen = struct { fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); assert(self.err_msg == null); - const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl); + const mod = self.module; + const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl, mod); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } @@ -2484,31 +2442,27 @@ pub const DeclGen = struct { } fn genDecl(dg: *DeclGen) !void { + const mod = dg.module; const decl = dg.decl; const decl_index = dg.decl_index; assert(decl.has_tv); - log.debug("gen: {s} type: {}, value: {}", .{ - decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), - }); - assert(decl.val.tag() != .function); - if (decl.val.castTag(.extern_fn)) |extern_fn| { - _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); + if (decl.val.getExternFunc(mod)) |extern_func| { + _ = try dg.resolveLlvmFunction(extern_func.decl); } else { - const target = dg.module.getTarget(); + const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); - global.setAlignment(decl.getAlignment(target)); - if (decl.@"linksection") |section| global.setSection(section); + global.setAlignment(decl.getAlignment(mod)); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s); assert(decl.has_tv); - const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { - const variable = payload.data; + const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { break :init_val variable.init; } else init_val: { global.setGlobalConstant(.True); - break :init_val decl.val; + break :init_val decl.val.toIntern(); }; - if (init_val.tag() != .unreachable_value) { - const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); + if (init_val != .none) { + const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); } else { @@ -2533,7 +2487,8 @@ pub const DeclGen = struct { new_global.setLinkage(global.getLinkage()); new_global.setUnnamedAddr(global.getUnnamedAddress()); new_global.setAlignment(global.getAlignment()); - if (decl.@"linksection") |section| new_global.setSection(section); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + new_global.setSection(s); new_global.setInitializer(llvm_init); // TODO: How should this work then the address space of a global changed? global.replaceAllUsesWith(new_global); @@ -2545,13 +2500,13 @@ pub const DeclGen = struct { } if (dg.object.di_builder) |dib| { - const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope); + const di_file = try dg.object.getDIFile(dg.gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; const is_internal_linkage = !dg.module.decl_exports.contains(decl_index); const di_global = dib.createGlobalVariableExpression( di_file.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), global.getValueName(), di_file, line_number, @@ -2560,7 +2515,7 @@ pub const DeclGen = struct { ); try dg.object.di_map.put(dg.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern()) global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global); } } } @@ -2569,36 +2524,35 @@ pub const DeclGen = struct { /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); - const fn_info = zig_fn_type.fnInfo(); - const target = dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const fn_info = mod.typeToFunc(zig_fn_type).?; + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); const fn_type = try dg.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(dg.module); - defer dg.gpa.free(fqn); + const fqn = try decl.getFullyQualifiedName(mod); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); + const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(mod.intern_pool.stringToSlice(fqn), fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; - const is_extern = decl.isExtern(); + const is_extern = decl.isExtern(mod); if (!is_extern) { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { - if (dg.module.getTarget().isWasm()) { - dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (decl.getExternFn().?.lib_name) |lib_name| { - const module_name = std.mem.sliceTo(lib_name, 0); - if (!std.mem.eql(u8, module_name, "c")) { - dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name); + if (target.isWasm()) { + dg.addFnAttrString(llvm_fn, "wasm-import-name", mod.intern_pool.stringToSlice(decl.name)); + if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } } } @@ -2608,12 +2562,12 @@ pub const DeclGen = struct { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type); + const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType()); llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.toType().isError(mod) and + mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { dg.addArgAttr(llvm_fn, @boolToInt(sret), "nonnull"); @@ -2635,14 +2589,14 @@ pub const DeclGen = struct { }, } - if (fn_info.alignment != 0) { - llvm_fn.setAlignment(fn_info.alignment); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + llvm_fn.setAlignment(@intCast(c_uint, a)); } // Function attributes that are independent of analysis results of the function body. dg.addCommonFnAttributes(llvm_fn); - if (fn_info.return_type.isNoReturn()) { + if (fn_info.return_type == .noreturn_type) { dg.addFnAttr(llvm_fn, "noreturn"); } @@ -2655,15 +2609,15 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + const param_ty = fn_info.param_types[param_index].toType(); + if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const param_llvm_ty = try dg.lowerType(param_ty.toType()); + const alignment = param_ty.toType().abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -2735,35 +2689,35 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(dg.object.decl_map.remove(decl_index)); - const decl = dg.module.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(dg.module); - defer dg.gpa.free(fqn); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + const fqn = try decl.getFullyQualifiedName(mod); - const target = dg.module.getTarget(); + const target = mod.getTarget(); const llvm_type = try dg.lowerType(decl.ty); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace( llvm_type, - fqn, + mod.intern_pool.stringToSlice(fqn), llvm_actual_addrspace, ); gop.value_ptr.* = llvm_global; // This is needed for declarations created by `@extern`. - if (decl.isExtern()) { - llvm_global.setValueName(decl.name); + if (decl.isExtern(mod)) { + llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name)); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (decl.val.castTag(.variable)) |variable| { - const single_threaded = dg.module.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.val.getVariable(mod)) |variable| { + const single_threaded = mod.comp.bin_file.options.single_threaded; + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); } } else { llvm_global.setLinkage(.Internal); @@ -2784,12 +2738,13 @@ pub const DeclGen = struct { fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const llvm_ty = try lowerTypeInner(dg, t); + const mod = dg.module; if (std.debug.runtime_safety and false) check: { - if (t.zigTypeTag() == .Opaque) break :check; - if (!t.hasRuntimeBits()) break :check; + if (t.zigTypeTag(mod) == .Opaque) break :check; + if (!t.hasRuntimeBits(mod)) break :check; if (!llvm_ty.isSized().toBool()) break :check; - const zig_size = t.abiSize(dg.module.getTarget()); + const zig_size = t.abiSize(mod); const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty); if (llvm_size != zig_size) { log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{ @@ -2802,18 +2757,18 @@ pub const DeclGen = struct { fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const gpa = dg.gpa; - const target = dg.module.getTarget(); - switch (t.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (t.zigTypeTag(mod)) { .Void, .NoReturn => return dg.context.voidType(), .Int => { - const info = t.intInfo(target); + const info = t.intInfo(mod); assert(info.bits != 0); return dg.context.intType(info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const int_ty = t.intTagType(&buffer); - const bit_count = int_ty.intInfo(target).bits; + const int_ty = t.intTagType(mod); + const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); }, @@ -2827,9 +2782,8 @@ pub const DeclGen = struct { }, .Bool => return dg.context.intType(1), .Pointer => { - if (t.isSlice()) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_type = t.slicePtrFieldType(&buf); + if (t.isSlice(mod)) { + const ptr_type = t.slicePtrFieldType(mod); const fields: [2]*llvm.Type = .{ try dg.lowerType(ptr_type), @@ -2837,49 +2791,41 @@ pub const DeclGen = struct { }; return dg.context.structType(&fields, fields.len, .False); } - const ptr_info = t.ptrInfo().data; + const ptr_info = t.ptrInfo(mod); const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); return dg.context.pointerType(llvm_addrspace); }, - .Opaque => switch (t.tag()) { - .@"opaque" => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); - if (gop.found_existing) return gop.value_ptr.*; + .Opaque => { + if (t.toIntern() == .anyopaque_type) return dg.context.intType(8); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); + if (gop.found_existing) return gop.value_ptr.*; - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(dg.module); - defer gpa.free(name); + const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; + const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type)); - const llvm_struct_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; - }, - .anyopaque => return dg.context.intType(8), - else => unreachable, + const llvm_struct_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + return llvm_struct_ty; }, .Array => { - const elem_ty = t.childType(); - assert(elem_ty.onePossibleValue() == null); + const elem_ty = t.childType(mod); + if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); - const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); + const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { - const elem_type = try dg.lowerType(t.childType()); - return elem_type.vectorType(t.vectorLen()); + const elem_type = try dg.lowerType(t.childType(mod)); + return elem_type.vectorType(t.vectorLen(mod)); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + const child_ty = t.optionalChild(mod); + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } const payload_llvm_ty = try dg.lowerType(child_ty); - if (t.optionalReprIsPayload()) { + if (t.optionalReprIsPayload(mod)) { return payload_llvm_ty; } @@ -2887,8 +2833,8 @@ pub const DeclGen = struct { var fields_buf: [3]*llvm.Type = .{ payload_llvm_ty, dg.context.intType(8), undefined, }; - const offset = child_ty.abiSize(target) + 1; - const abi_size = t.abiSize(target); + const offset = child_ty.abiSize(mod) + 1; + const abi_size = t.abiSize(mod); const padding = @intCast(c_uint, abi_size - offset); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2897,18 +2843,18 @@ pub const DeclGen = struct { return dg.context.structType(&fields_buf, 3, .False); }, .ErrorUnion => { - const payload_ty = t.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = t.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try dg.lowerType(Type.anyerror); } const llvm_error_type = try dg.lowerType(Type.anyerror); const llvm_payload_type = try dg.lowerType(payload_ty); - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiSize(mod); var fields_buf: [3]*llvm.Type = undefined; if (error_align > payload_align) { @@ -2941,66 +2887,64 @@ pub const DeclGen = struct { }, .ErrorSet => return dg.context.intType(16), .Struct => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) { + .anon_struct_type => |tuple| { + const llvm_struct_ty = dg.context.structCreateNamed(""); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - if (t.isSimpleTupleOrAnonStruct()) { - const tuple = t.tupleFields(); - const llvm_struct_ty = dg.context.structCreateNamed(""); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; + defer llvm_field_types.deinit(gpa); - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); - try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; + for (tuple.types, tuple.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - const field_align = field_ty.abiAlignment(target); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } + const field_llvm_ty = try dg.lowerType(field_ty.toType()); + try llvm_field_types.append(gpa, field_llvm_ty); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); + offset += field_ty.toType().abiSize(mod); } - const field_llvm_ty = try dg.lowerType(field_ty); - try llvm_field_types.append(gpa, field_llvm_ty); - - offset += field_ty.abiSize(target); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } } - } - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), - .False, - ); + llvm_struct_ty.structSetBody( + llvm_field_types.items.ptr, + @intCast(c_uint, llvm_field_types.items.len), + .False, + ); - return llvm_struct_ty; - } + return llvm_struct_ty; + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - const struct_obj = t.castTag(.@"struct").?.data; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -3009,8 +2953,7 @@ pub const DeclGen = struct { return int_llvm_ty; } - const name = try struct_obj.getFullyQualifiedName(dg.module); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -3027,11 +2970,11 @@ pub const DeclGen = struct { var big_align: u32 = 1; var any_underaligned_fields = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); - const field_ty_align = field.ty.abiAlignment(target); + const field_align = field.alignment(mod, struct_obj.layout); + const field_ty_align = field.ty.abiAlignment(mod); any_underaligned_fields = any_underaligned_fields or field_align < field_ty_align; big_align = @max(big_align, field_align); @@ -3046,7 +2989,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3067,18 +3010,14 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - - const layout = t.unionGetLayout(target); - const union_obj = t.cast(Type.Payload.Union).?.data; + const layout = t.unionGetLayout(mod); + const union_obj = mod.typeToUnion(t).?; if (union_obj.layout == .Packed) { - const bitsize = @intCast(c_uint, t.bitSize(target)); + const bitsize = @intCast(c_uint, t.bitSize(mod)); const int_llvm_ty = dg.context.intType(bitsize); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; @@ -3090,8 +3029,7 @@ pub const DeclGen = struct { return enum_tag_llvm_ty; } - const name = try union_obj.getFullyQualifiedName(dg.module); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod)); const llvm_union_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls @@ -3155,25 +3093,21 @@ pub const DeclGen = struct { } fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { - const target = dg.module.getTarget(); - const fn_info = fn_ty.fnInfo(); + const mod = dg.module; + const fn_info = mod.typeToFunc(fn_ty).?; const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); defer llvm_params.deinit(); - if (firstParamSRet(fn_info, target)) { + if (firstParamSRet(fn_info, mod)) { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing) + if (fn_info.return_type.toType().isError(mod) and + mod.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = dg.object.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(try dg.object.getStackTraceType()); try llvm_params.append(try dg.lowerType(ptr_ty)); } @@ -3181,25 +3115,23 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); try llvm_params.append(try dg.lowerType(param_ty)); }, .byref, .byref_mut => { try llvm_params.append(dg.context.pointerType(0)); }, .abi_sized_int => { - const param_ty = fn_info.param_types[it.zig_index - 1]; - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { - const param_ty = fn_info.param_types[it.zig_index - 1]; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - var opt_buf: Type.Payload.ElemType = undefined; - const ptr_ty = if (param_ty.zigTypeTag() == .Optional) - param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) + param_ty.optionalChild(mod).slicePtrFieldType(mod) else - param_ty.slicePtrFieldType(&buf); + param_ty.slicePtrFieldType(mod); const ptr_llvm_ty = try dg.lowerType(ptr_ty); const len_llvm_ty = try dg.lowerType(Type.usize); @@ -3214,8 +3146,8 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.intType(16)); }, .float_array => |count| { - const param_ty = fn_info.param_types[it.zig_index - 1]; - const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?); + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); @@ -3239,11 +3171,12 @@ pub const DeclGen = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type { - const lower_elem_ty = switch (elem_ty.zigTypeTag()) { + const mod = dg.module; + const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, - .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(), - else => elem_ty.hasRuntimeBitsIgnoreComptime(), + .Fn => !mod.typeToFunc(elem_ty).?.is_generic, + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), + else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) try dg.lowerType(elem_ty) @@ -3254,374 +3187,105 @@ pub const DeclGen = struct { } fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value { + const mod = dg.module; + const target = mod.getTarget(); var tv = arg_tv; - if (tv.val.castTag(.runtime_value)) |rt| { - tv.val = rt.data; + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + .runtime_value => |rt| tv.val = rt.val.toValue(), + else => {}, } - if (tv.val.isUndef()) { + if (tv.val.isUndefDeep(mod)) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - const target = dg.module.getTarget(); - switch (tv.ty.zigTypeTag()) { - .Bool => { - const llvm_type = try dg.lowerType(tv.ty); - return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); - }, - // TODO this duplicates code with Pointer but they should share the handling - // of the tv.val.tag() and then Int should do extra constPtrToInt on top - .Int => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - else => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, target); - const int_info = tv.ty.intInfo(target); - assert(int_info.bits != 0); - const llvm_type = dg.context.intType(int_info.bits); + const val_key = mod.intern_pool.indexToKey(tv.val.toIntern()); + switch (val_key) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - }, - .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = tv.enumToInt(&int_buffer); - - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, target); - - const int_info = tv.ty.intInfo(target); - const llvm_type = dg.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - .Float => { - const llvm_ty = try dg.lowerType(tv.ty); - switch (tv.ty.floatBits(target)) { - 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16)); - const llvm_i16 = dg.context.intType(16); - const int = llvm_i16.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32)); - const llvm_i32 = dg.context.intType(32); - const int = llvm_i32.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64)); - const llvm_i64 = dg.context.intType(64); - const int = llvm_i64.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 80 => { - const float = tv.val.toFloat(f80); - const repr = std.math.break_f80(float); - const llvm_i80 = dg.context.intType(80); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, - } - }, - .Pointer => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl_index = tv.val.castTag(.variable).?.data.owner_decl; - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(decl_index); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - return addrspace_casted_ptr; - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const fields: [2]*llvm.Value = .{ - try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf), - .val = slice.ptr, - }), - try dg.lowerValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False); - return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo().data.bit_offset % 8 == 0); - }, - .null_value, .zero => { + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => { const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); + return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, - .opt_payload => { - const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo().data.bit_offset % 8 == 0); - }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ - tv.ty.fmtDebug(), tag, - }), }, - .Array => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - .aggregate => { - const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.elemType(); - const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel()); - const llvm_elems = try gpa.alloc(*llvm.Value, len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals[0..len], 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .repeated => { - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const sentinel = tv.ty.sentinel(); - const len = @intCast(usize, tv.ty.arrayLen()); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); - - var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); - } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } - - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .empty_array_sentinel => { - const elem_ty = tv.ty.elemType(); - const sent_val = tv.ty.sentinel().?; - const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); - const llvm_elems: [1]*llvm.Value = .{sentinel}; - const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); - if (need_unnamed) { - return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); - } - }, - else => unreachable, - }, - .Optional => { - comptime assert(optional_layout_version == 3); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = tv.ty.optionalChild(&buf); - - const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(); - const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return non_null_bit; - } - const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload()) { - if (tv.val.castTag(.opt_payload)) |payload| { - return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); - } else if (is_pl) { - return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }); - } else { - return llvm_ty.constNull(); - } - } - assert(payload_ty.zigTypeTag() != .Fn); - - const llvm_field_count = llvm_ty.countStructElementTypes(); - var fields_buf: [3]*llvm.Value = undefined; - fields_buf[0] = try dg.lowerValue(.{ - .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), - }); - fields_buf[1] = non_null_bit; - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); - } - return dg.context.constStruct(&fields_buf, llvm_field_count, .False); - }, - .Fn => { - const fn_decl_index = switch (tv.val.tag()) { - .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, - .function => tv.val.castTag(.function).?.data.owner_decl, + .variable, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .extern_func, .func => { + const fn_decl_index = switch (val_key) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, else => unreachable, }; const fn_decl = dg.module.declPtr(fn_decl_index); - dg.module.markDeclAlive(fn_decl); + try dg.module.markDeclAlive(fn_decl); return dg.resolveLlvmFunction(fn_decl_index); }, - .ErrorSet => { - const llvm_ty = try dg.lowerType(Type.anyerror); - switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); - }, - } + .int => { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = tv.val.toBigInt(&bigint_space, mod); + return lowerBigInt(dg, tv.ty, bigint); }, - .ErrorUnion => { - const payload_type = tv.ty.errorUnionPayload(); - const is_pl = tv.val.errorUnionIsPayload(); - - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + .err => |err| { + const llvm_ty = try dg.lowerType(Type.anyerror); + const int = try mod.getErrorValue(err.name); + return llvm_ty.constInt(int, .False); + }, + .error_union => |error_union| { + const err_tv: TypedValue = switch (error_union.val) { + .err_name => |err_name| .{ + .ty = tv.ty.errorUnionSet(mod), + .val = (try mod.intern(.{ .err = .{ + .ty = tv.ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } })).toValue(), + }, + .payload => .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, + }; + const payload_type = tv.ty.errorUnionPayload(mod); + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.initTag(.zero); - return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); + return dg.lowerValue(err_tv); } - const payload_align = payload_type.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const llvm_error_value = try dg.lowerValue(.{ - .ty = Type.anyerror, - .val = if (is_pl) Value.initTag(.zero) else tv.val, - }); + const payload_align = payload_type.abiAlignment(mod); + const error_align = err_tv.ty.abiAlignment(mod); + const llvm_error_value = try dg.lowerValue(err_tv); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), + .payload => |payload| payload, + }.toValue(), }); var fields_buf: [3]*llvm.Value = undefined; @@ -3642,13 +3306,235 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields_buf, llvm_field_count, .False); } }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const field_vals = tv.val.castTag(.aggregate).?.data; - const gpa = dg.gpa; + .enum_tag => { + const int_val = try tv.enumToInt(mod); + + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_space, mod); + + const int_info = tv.ty.intInfo(mod); + const llvm_type = dg.context.intType(int_info.bits); + + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + }, + .float => { + const llvm_ty = try dg.lowerType(tv.ty); + switch (tv.ty.floatBits(target)) { + 16 => { + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); + const llvm_i16 = dg.context.intType(16); + const int = llvm_i16.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 32 => { + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); + const llvm_i32 = dg.context.intType(32); + const int = llvm_i32.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 64 => { + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); + const llvm_i64 = dg.context.intType(64); + const int = llvm_i64.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 80 => { + const float = tv.val.toFloat(f80, mod); + const repr = std.math.break_f80(float); + const llvm_i80 = dg.context.intType(80); + var x = llvm_i80.constInt(repr.exp, .False); + x = x.constShl(llvm_i80.constInt(64, .False)); + x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); + if (backendSupportsF80(target)) { + return x.constBitCast(llvm_ty); + } else { + return x; + } + }, + 128 => { + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); + // LLVM seems to require that the lower half of the f128 be placed first + // in the buffer. + if (native_endian == .Big) { + std.mem.swap(u64, &buf[0], &buf[1]); + } + const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); + return int.constBitCast(llvm_ty); + }, + else => unreachable, + } + }, + .ptr => |ptr| { + const ptr_tv: TypedValue = switch (ptr.len) { + .none => tv, + else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + }; + const llvm_ptr_val = switch (ptr.addr) { + .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), + .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), + .int => |int| try dg.lowerIntAsPtr(int.toValue()), + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), + .comptime_field => unreachable, + }; + switch (ptr.len) { + .none => return llvm_ptr_val, + else => { + const fields: [2]*llvm.Value = .{ + llvm_ptr_val, + try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + } + }, + .opt => |opt| { + comptime assert(optional_layout_version == 3); + const payload_ty = tv.ty.optionalChild(mod); + + const llvm_i8 = dg.context.intType(8); + const non_null_bit = switch (opt.val) { + .none => llvm_i8.constNull(), + else => llvm_i8.constInt(1, .False), + }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return non_null_bit; + } + const llvm_ty = try dg.lowerType(tv.ty); + if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { + .none => llvm_ty.constNull(), + else => |payload| dg.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), + }; + assert(payload_ty.zigTypeTag(mod) != .Fn); + + const llvm_field_count = llvm_ty.countStructElementTypes(); + var fields_buf: [3]*llvm.Value = undefined; + fields_buf[0] = try dg.lowerValue(.{ + .ty = payload_ty, + .val = switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => |payload| payload, + }.toValue(), + }); + fields_buf[1] = non_null_bit; + if (llvm_field_count > 2) { + assert(llvm_field_count == 3); + fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + } + return dg.context.constStruct(&fields_buf, llvm_field_count, .False); + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { + .array_type => switch (aggregate.storage) { + .bytes => |bytes| return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ), + .elems => |elem_vals| { + const elem_ty = tv.ty.childType(mod); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals, 0..) |elem_val, i| { + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated_elem => |val| { + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); + + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); + } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } + + if (sentinel) |sent| { + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } + + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + }, + .vector_type => |vector_type| { + const elem_ty = vector_type.child.toType(); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len); + defer dg.gpa.free(llvm_elems); + const llvm_i8 = dg.context.intType(8); + for (llvm_elems, 0..) |*llvm_elem, i| { + llvm_elem.* = switch (aggregate.storage) { + .bytes => |bytes| llvm_i8.constInt(bytes[i], .False), + .elems => |elems| try dg.lowerValue(.{ + .ty = elem_ty, + .val = elems[i].toValue(), + }), + .repeated_elem => |elem| try dg.lowerValue(.{ + .ty = elem_ty, + .val = elem.toValue(), + }), + }; + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .anon_struct_type => |tuple| { + const gpa = dg.gpa; - if (tv.ty.isSimpleTupleOrAnonStruct()) { - const tuple = tv.ty.tupleFields(); var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; defer llvm_fields.deinit(gpa); @@ -3659,11 +3545,11 @@ pub const DeclGen = struct { var big_align: u32 = 0; var need_unnamed = false; - for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3677,15 +3563,113 @@ pub const DeclGen = struct { } const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty, - .val = field_vals[i], + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), }); - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val); + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field_ty.abiSize(target); + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } + + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + const llvm_struct_ty = try dg.lowerType(tv.ty); + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; + + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; + } + + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3709,127 +3693,39 @@ pub const DeclGen = struct { @intCast(c_uint, llvm_fields.items.len), ); } - } - - const struct_obj = tv.ty.castTag(.@"struct").?.data; - - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (field_vals, 0..) |field_val, i| { - const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; - - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = field_val, - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } - - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - var it = struct_obj.runtimeFieldIterator(); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = field_vals[field_and_index.index], - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field.ty.abiSize(target); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } + }, + else => unreachable, }, - .Union => { + .un => { const llvm_union_ty = try dg.lowerType(tv.ty); - const tag_and_val = tv.val.castTag(.@"union").?.data; + const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { + .none => tv.val.castTag(.@"union").?.data, + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, + else => unreachable, + }, + }; - const layout = tv.ty.unionGetLayout(target); + const layout = tv.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return lowerValue(dg, .{ - .ty = tv.ty.unionTagTypeSafety().?, + .ty = tv.ty.unionTagTypeSafety(mod).?, .val = tag_and_val.tag, }); } - const union_obj = tv.ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(tv.ty).?; const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; assert(union_obj.haveFieldTypes()); const field_ty = union_obj.fields.values()[field_index].ty; if (union_obj.layout == .Packed) { - if (!field_ty.hasRuntimeBits()) + if (!field_ty.hasRuntimeBits(mod)) return llvm_union_ty.constNull(); const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @intCast(u16, field_ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field_ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field_ty.isPtrAtRuntime()) + const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3842,13 +3738,13 @@ pub const DeclGen = struct { // must pointer cast to the expected type before accessing the union. var need_unnamed: bool = layout.most_aligned_field != field_index; const payload = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field); - const field_size = field_ty.abiSize(target); + const field_size = field_ty.abiSize(mod); if (field_size == layout.payload_size) { break :p field; } @@ -3868,7 +3764,7 @@ pub const DeclGen = struct { } } const llvm_tag_value = try lowerValue(dg, .{ - .ty = tv.ty.unionTagTypeSafety().?, + .ty = tv.ty.unionTagTypeSafety(mod).?, .val = tag_and_val.tag, }); var fields: [3]*llvm.Value = undefined; @@ -3888,109 +3784,47 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.elemType(); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .aggregate => { - // Note, sentinel is not stored even if the type has a sentinel. - // The value includes the sentinel in those cases. - const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); - assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.elemType(); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .repeated => { - // Note, sentinel is not stored even if the type has a sentinel. - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const len = @intCast(usize, tv.ty.arrayLen()); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen()); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.elemType(); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), + .memoized_call => unreachable, } } + fn lowerIntAsPtr(dg: *DeclGen, val: Value) Error!*llvm.Value { + switch (dg.module.intern_pool.indexToKey(val.toIntern())) { + .undef => return dg.context.pointerType(0).getUndef(), + .int => { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_space, dg.module); + const llvm_int = lowerBigInt(dg, Type.usize, bigint); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); + }, + else => unreachable, + } + } + + fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { + const mod = dg.module; + const int_info = ty.intInfo(mod); + assert(int_info.bits != 0); + const llvm_type = dg.context.intType(int_info.bits); + + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + } + const ParentPtr = struct { ty: Type, llvm_ptr: *llvm.Value, @@ -4001,57 +3835,86 @@ pub const DeclGen = struct { ptr_val: Value, decl_index: Module.Decl.Index, ) Error!*llvm.Value { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + try mod.markDeclAlive(decl); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { - const target = dg.module.getTarget(); - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .decl_ref => { - const decl = ptr_val.castTag(.decl_ref).?.data; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .variable => { - const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .int_i64 => { - const int = ptr_val.castTag(.int_i64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, - .int_u64 => { - const int = ptr_val.castTag(.int_u64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); - const parent_ty = field_ptr.container_ty; + const mod = dg.module; + const target = mod.getTarget(); + return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { + .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), + .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), + .int => |int| dg.lowerIntAsPtr(int.toValue()), + .eu_payload => |eu_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true); - const field_index = @intCast(u32, field_ptr.field_index); + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + const payload_ty = eu_ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + // In this case, we represent pointer to error union the same as pointer + // to the payload. + return parent_llvm_ptr; + } + + const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag()) { + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(payload_offset, .False), + }; + const eu_llvm_ty = try dg.lowerType(eu_ty); + return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .opt_payload => |opt_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(opt_ptr.toValue(), true); + + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + payload_ty.optionalReprIsPayload(mod)) + { + // In this case, we represent pointer to optional the same as pointer + // to the payload. + return parent_llvm_ptr; + } + + const llvm_u32 = dg.context.intType(32); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(0, .False), + }; + const opt_llvm_ty = try dg.lowerType(opt_ty); + return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .comptime_field => unreachable, + .elem => |elem_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.base.toValue(), true); + + const llvm_usize = try dg.lowerType(Type.usize); + const indices: [1]*llvm.Value = .{ + llvm_usize.constInt(elem_ptr.index, .False), + }; + const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + const elem_llvm_ty = try dg.lowerType(elem_ty); + return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .field => |field_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); + const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + + const field_index = @intCast(u32, field_ptr.index); + const llvm_u32 = dg.context.intType(32); + switch (parent_ty.zigTypeTag(mod)) { .Union => { - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return parent_llvm_ptr; } - const layout = parent_ty.unionGetLayout(target); + const layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. @@ -4069,16 +3932,16 @@ pub const DeclGen = struct { return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .Struct => { - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { var b: usize = 0; - for (parent_ty.structFields().values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - b += @intCast(usize, field.ty.bitSize(target)); + for (parent_ty.structFields(mod).values()[0..field_index]) |field| { + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(usize, field.ty.bitSize(mod)); } break :b b; }; @@ -4088,23 +3951,21 @@ pub const DeclGen = struct { return field_addr.constIntToPtr(final_llvm_ty); } - var ty_buf: Type.Payload.Pointer = undefined; - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field_index, .False), + llvm_u32.constInt(llvm_field.index, .False), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } }, .Pointer => { - assert(parent_ty.isSlice()); + assert(parent_ty.isSlice(mod)); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(field_index, .False), @@ -4115,61 +3976,7 @@ pub const DeclGen = struct { else => unreachable, } }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, true); - - const llvm_usize = try dg.lowerType(Type.usize); - const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; - const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .opt_payload_ptr => { - const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - var buf: Type.Payload.ElemType = undefined; - - const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - payload_ty.optionalReprIsPayload()) - { - // In this case, we represent pointer to optional the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .eu_payload_ptr => { - const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); - - const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - // In this case, we represent pointer to error union the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), - }; - const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } + }; } fn lowerDeclRefValue( @@ -4177,57 +3984,39 @@ pub const DeclGen = struct { tv: TypedValue, decl_index: Module.Decl.Index, ) Error!*llvm.Value { - if (tv.ty.isSlice()) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = tv.ty.slicePtrFieldType(&buf); - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(self.module), - }; - const fields: [2]*llvm.Value = .{ - try self.lowerValue(.{ - .ty = ptr_ty, - .val = tv.val, - }), - try self.lowerValue(.{ - .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), - }), - }; - return self.context.constStruct(&fields, fields.len, .False); - } + const mod = self.module; // In the case of something like: // fn foo() void {} // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. - const decl = self.module.declPtr(decl_index); - if (decl.val.castTag(.function)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + const decl = mod.declPtr(decl_index); + if (decl.val.getFunction(mod)) |func| { + if (func.owner_decl != decl_index) { + return self.lowerDeclRefValue(tv, func.owner_decl); } - } else if (decl.val.castTag(.extern_fn)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + } else if (decl.val.getExternFunc(mod)) |func| { + if (func.decl != decl_index) { + return self.lowerDeclRefValue(tv, func.decl); } } - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if ((!is_fn_body and !decl.ty.hasRuntimeBits()) or - (is_fn_body and decl.ty.fnInfo().is_generic)) + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or + (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) { return self.lowerPtrToVoid(tv.ty); } - self.module.markDeclAlive(decl); + try mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) try self.resolveLlvmFunction(decl_index) else try self.resolveGlobalDecl(decl_index); - const target = self.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { @@ -4236,7 +4025,7 @@ pub const DeclGen = struct { } else llvm_decl_val; const llvm_type = try self.lowerType(tv.ty); - if (tv.ty.zigTypeTag() == .Int) { + if (tv.ty.zigTypeTag(mod) == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { return llvm_val.constBitCast(llvm_type); @@ -4244,7 +4033,8 @@ pub const DeclGen = struct { } fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value { - const alignment = ptr_ty.ptrInfo().data.@"align"; + const mod = dg.module; + const alignment = ptr_ty.ptrInfo(mod).@"align"; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4338,21 +4128,20 @@ pub const DeclGen = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type { - const target = dg.module.getTarget(); - var buffer: Type.Payload.Bits = undefined; - const int_ty = switch (ty.zigTypeTag()) { + const mod = dg.module; + const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(&buffer), + .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return null; - return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); }, .Bool => return dg.context.intType(8), else => return null, }; - const bit_count = int_ty.intInfo(target).bits; + const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8)); } else { return null; } @@ -4363,18 +4152,18 @@ pub const DeclGen = struct { llvm_fn: *llvm.Value, param_ty: Type, param_index: u32, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) void { - const target = dg.module.getTarget(); - if (param_ty.isPtrAtRuntime()) { - const ptr_info = param_ty.ptrInfo().data; + const mod = dg.module; + if (param_ty.isPtrAtRuntime(mod)) { + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); } } - if (!param_ty.isPtrLikeOptional() and !ptr_info.@"allowzero") { + if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") { dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -4383,13 +4172,10 @@ pub const DeclGen = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max( - ptr_info.pointee_type.abiAlignment(target), - 1, - ); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align); } - } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) { + } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"), .unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"), }; @@ -4490,21 +4276,23 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst); if (gop.found_existing) return gop.value_ptr.*; + const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ - .ty = self.air.typeOf(inst), - .val = self.air.value(inst).?, + .ty = self.typeOf(inst), + .val = (try self.air.value(inst, mod)).?, }); gop.value_ptr.* = llvm_val; return llvm_val; } fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { + const mod = self.dg.module; const llvm_val = try self.dg.lowerValue(tv); - if (!isByRef(tv.ty)) return llvm_val; + if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); const global = self.dg.object.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace); @@ -4512,7 +4300,7 @@ pub const FuncGen = struct { global.setLinkage(.Private); global.setGlobalConstant(.True); global.setUnnamedAddr(.True); - global.setAlignment(tv.ty.abiAlignment(target)); + global.setAlignment(tv.ty.abiAlignment(mod)); const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) else @@ -4521,11 +4309,12 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { + const mod = self.dg.module; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const opt_value: ?*llvm.Value = switch (air_tags[inst]) { // zig fmt: off @@ -4742,8 +4531,8 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), - .constant => unreachable, - .const_ty => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, + .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), .dbg_inline_begin => try self.airDbgInlineBegin(inst), @@ -4774,29 +4563,30 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const mod = self.dg.module; + const callee_ty = self.typeOf(pl_op.operand); + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); - const return_type = fn_info.return_type; + const fn_info = mod.typeToFunc(zig_fn_ty).?; + const return_type = fn_info.return_type.toType(); const llvm_fn = try self.resolveInst(pl_op.operand); - const target = self.dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa); defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = try self.dg.lowerType(return_type); - const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(target)); + const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError() and + const err_return_tracing = return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4807,11 +4597,11 @@ pub const FuncGen = struct { .no_bits => continue, .byval => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4821,12 +4611,12 @@ pub const FuncGen = struct { }, .byref => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = llvm_arg.typeOf(); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); @@ -4836,13 +4626,13 @@ pub const FuncGen = struct { }, .byref_mut => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = try self.dg.lowerType(param_ty); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); @@ -4857,13 +4647,13 @@ pub const FuncGen = struct { }, .abi_sized_int => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4871,7 +4661,7 @@ pub const FuncGen = struct { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const int_ptr = self.buildAlloca(int_llvm_ty, alignment); @@ -4893,14 +4683,14 @@ pub const FuncGen = struct { }, .multiple_llvm_types => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const arg_ptr = if (is_by_ref) llvm_arg else p: { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(param_ty.abiAlignment(target)); + store_inst.setAlignment(param_ty.abiAlignment(mod)); break :p p; }; @@ -4922,19 +4712,19 @@ pub const FuncGen = struct { }, .float_array => |count| { const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } - const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?); + const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); const array_llvm_ty = float_ty.arrayType(count); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4942,17 +4732,17 @@ pub const FuncGen = struct { .i32_array, .i64_array => |arr_len| { const elem_size: u8 = if (lowering == .i32_array) 32 else 64; const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4969,7 +4759,7 @@ pub const FuncGen = struct { "", ); - if (callee_ty.zigTypeTag() == .Pointer) { + if (callee_ty.zigTypeTag(mod) == .Pointer) { // Add argument attributes for function pointer calls. it = iterateParamTypes(self.dg, fn_info); it.llvm_index += @boolToInt(sret); @@ -4977,16 +4767,16 @@ pub const FuncGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + const param_ty = fn_info.param_types[param_index].toType(); + if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param_llvm_ty = try self.dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -5004,8 +4794,8 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; if (math.cast(u5, it.zig_index - 1)) |i| { @@ -5013,7 +4803,7 @@ pub const FuncGen = struct { self.dg.addArgAttr(call, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { self.dg.addArgAttr(call, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -5022,18 +4812,18 @@ pub const FuncGen = struct { if (ptr_info.@"align" != 0) { self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align); } }, }; } - if (return_type.isNoReturn() and attr != .AlwaysTail) { + if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) { return null; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } @@ -5041,12 +4831,12 @@ pub const FuncGen = struct { if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { // our by-ref status disagrees with sret so we must load. const loaded = self.builder.buildLoad(llvm_ret_ty, rp, ""); - loaded.setAlignment(return_type.abiAlignment(target)); + loaded.setAlignment(return_type.abiAlignment(mod)); return loaded; } } @@ -5061,7 +4851,7 @@ pub const FuncGen = struct { const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, ""); @@ -5070,10 +4860,10 @@ pub const FuncGen = struct { } } - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(target); + const alignment = return_type.abiAlignment(mod); const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); @@ -5084,22 +4874,19 @@ pub const FuncGen = struct { } fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ret_ty = self.air.typeOf(un_op); + const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); _ = self.builder.buildRetVoid(); return null; } - const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5113,10 +4900,9 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const operand = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); - const alignment = ret_ty.abiAlignment(target); + const alignment = ret_ty.abiAlignment(mod); - if (isByRef(ret_ty)) { + if (isByRef(ret_ty, mod)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); @@ -5141,12 +4927,13 @@ pub const FuncGen = struct { } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); - const ret_ty = ptr_ty.childType(); - const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + const ptr_ty = self.typeOf(un_op); + const ret_ty = ptr_ty.childType(mod); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5162,10 +4949,9 @@ pub const FuncGen = struct { return null; } const ptr = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); - loaded.setAlignment(ret_ty.abiAlignment(target)); + loaded.setAlignment(ret_ty.abiAlignment(mod)); _ = self.builder.buildRet(loaded); return null; } @@ -5184,9 +4970,9 @@ pub const FuncGen = struct { const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); + const mod = self.dg.module; - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; @@ -5202,7 +4988,7 @@ pub const FuncGen = struct { const args: [2]*llvm.Value = .{ dest_list, src_list }; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return dest_list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, ""); @@ -5227,11 +5013,11 @@ pub const FuncGen = struct { } fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const va_list_ty = self.air.typeOfIndex(inst); + const mod = self.dg.module; + const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; @@ -5243,7 +5029,7 @@ pub const FuncGen = struct { const args: [1]*llvm.Value = .{list}; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, list, ""); @@ -5258,7 +5044,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const operand_ty = self.air.typeOf(bin_op.lhs); + const operand_ty = self.typeOf(bin_op.lhs); return self.cmp(lhs, rhs, operand_ty, op); } @@ -5271,7 +5057,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const vec_ty = self.air.typeOf(extra.lhs); + const vec_ty = self.typeOf(extra.lhs); const cmp_op = extra.compareOperator(); return self.cmp(lhs, rhs, vec_ty, cmp_op); @@ -5292,23 +5078,21 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var int_buffer: Type.Payload.Bits = undefined; - var opt_buffer: Type.Payload.ElemType = undefined; - - const scalar_ty = operand_ty.scalarType(); - const int_ty = switch (scalar_ty.zigTypeTag()) { - .Enum => scalar_ty.intTagType(&int_buffer), + const mod = self.dg.module; + const scalar_ty = operand_ty.scalarType(mod); + const int_ty = switch (scalar_ty.zigTypeTag(mod)) { + .Enum => scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { - const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - operand_ty.optionalReprIsPayload()) + const payload_ty = operand_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty); + const is_by_ref = isByRef(scalar_ty, mod); const opt_llvm_ty = try self.dg.lowerType(scalar_ty); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); @@ -5375,7 +5159,7 @@ pub const FuncGen = struct { .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), else => unreachable, }; - const is_signed = int_ty.isSignedInt(); + const is_signed = int_ty.isSignedInt(mod); const operation: llvm.IntPredicate = switch (op) { .eq => .EQ, .neq => .NE, @@ -5388,13 +5172,14 @@ pub const FuncGen = struct { } fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); - if (inst_ty.isNoReturn()) { + if (inst_ty.isNoReturn(mod)) { try self.genBody(body); return null; } @@ -5414,8 +5199,8 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(parent_bb); // Create a phi node only if the block returns a value. - const is_body = inst_ty.zigTypeTag() == .Fn; - if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null; + const is_body = inst_ty.zigTypeTag(mod) == .Fn; + if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const raw_llvm_ty = try self.dg.lowerType(inst_ty); @@ -5424,7 +5209,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (is_body or isByRef(inst_ty)) { + if (is_body or isByRef(inst_ty, mod)) { break :ty self.context.pointerType(0); } break :ty raw_llvm_ty; @@ -5444,8 +5229,9 @@ pub const FuncGen = struct { const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. - const operand_ty = self.air.typeOf(branch.operand); - if (operand_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.zigTypeTag() == .Fn) { + const operand_ty = self.typeOf(branch.operand); + const mod = self.dg.module; + if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5481,24 +5267,26 @@ pub const FuncGen = struct { } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5512,12 +5300,12 @@ pub const FuncGen = struct { can_elide_load: bool, is_unused: bool, ) !?*llvm.Value { - const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const payload_ty = err_union_ty.errorUnionPayload(mod); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const is_err = err: { const err_set_ty = try fg.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -5529,8 +5317,8 @@ pub const FuncGen = struct { err_union; break :err fg.builder.buildICmp(.NE, loaded, zero, ""); } - const err_field_index = errUnionErrorOffset(payload_ty, target); - if (operand_is_ptr or isByRef(err_union_ty)) { + const err_field_index = errUnionErrorOffset(payload_ty, mod); + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -5555,30 +5343,31 @@ pub const FuncGen = struct { if (!payload_has_bits) { return if (operand_is_ptr) err_union else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); if (operand_is_ptr) { return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return fg.builder.buildExtractValue(err_union, offset, ""); } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_usize = self.context.intType(target.ptrBitWidth()); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") @@ -5623,6 +5412,7 @@ pub const FuncGen = struct { } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; @@ -5638,21 +5428,22 @@ pub const FuncGen = struct { // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. - if (body.len == 0 or !self.air.typeOfIndex(body[body.len - 1]).isNoReturn()) { + if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) { _ = self.builder.buildBr(loop_block); } return null; } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); - const array_ty = operand_ty.childType(); + const operand_ty = self.typeOf(ty_op.operand); + const array_ty = operand_ty.childType(mod); const llvm_usize = try self.dg.lowerType(Type.usize); - const len = llvm_usize.constInt(array_ty.arrayLen(), .False); - const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); + const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime()) { + if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } @@ -5666,30 +5457,31 @@ pub const FuncGen = struct { } fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_ty = self.typeOf(ty_op.operand); + const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_ty = self.typeOfIndex(inst); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); } } - const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target)); + const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod)); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); var extended = e: { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); } else { break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, ""); @@ -5698,7 +5490,7 @@ pub const FuncGen = struct { const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); - const sign_prefix = if (operand_scalar_ty.isSignedInt()) "" else "un"; + const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{ sign_prefix, @@ -5724,27 +5516,28 @@ pub const FuncGen = struct { fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_ty = self.typeOf(ty_op.operand); + const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_ty = self.typeOfIndex(inst); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag - if (dest_scalar_ty.isSignedInt()) { + if (dest_scalar_ty.isSignedInt(mod)) { return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); } else { return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); } } - const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target))); + const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod))); const ret_ty = self.context.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -5756,7 +5549,7 @@ pub const FuncGen = struct { const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits); const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits); - const sign_prefix = if (dest_scalar_ty.isSignedInt()) "" else "uns"; + const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{ @@ -5778,7 +5571,8 @@ pub const FuncGen = struct { } fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - if (ty.isSlice()) { + const mod = fg.dg.module; + if (ty.isSlice(mod)) { return fg.builder.buildExtractValue(ptr, 0, ""); } else { return ptr; @@ -5786,22 +5580,23 @@ pub const FuncGen = struct { } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); - const elem_ty = ty.childType(); - const abi_size = elem_ty.abiSize(target); + const elem_ty = ty.childType(mod); + const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); }, .One => { - const array_ty = ty.childType(); - const elem_ty = array_ty.childType(); - const abi_size = elem_ty.abiSize(target); - return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); + const array_ty = ty.childType(mod); + const elem_ty = array_ty.childType(mod); + const abi_size = elem_ty.abiSize(mod); + return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False); }, .Many, .C => unreachable, } @@ -5814,67 +5609,69 @@ pub const FuncGen = struct { } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); - const slice_ptr_ty = self.air.typeOf(ty_op.operand); - const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); + const slice_ptr_ty = self.typeOf(ty_op.operand); + const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType(mod)); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); } fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, slice_ty); } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType()); + const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType(mod)); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); } fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); - const elem_ty = array_ty.childType(); - if (isByRef(array_ty)) { + const elem_ty = array_ty.childType(mod); + if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); } else { const lhs_index = Air.refToIndex(bin_op.lhs).?; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -5901,15 +5698,16 @@ pub const FuncGen = struct { } fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = ptr_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch - const ptr = if (ptr_ty.isSinglePointer()) ptr: { + const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -5917,32 +5715,32 @@ pub const FuncGen = struct { const indices: [1]*llvm.Value = .{rhs}; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, ptr_ty); } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = ptr_ty.childType(mod); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const elem_ptr = self.air.getRefType(ty_pl.ty); - if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr; + if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr; const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); - if (ptr_ty.isSinglePointer()) { + if (ptr_ty.isSinglePointer(mod)) { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -5956,7 +5754,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); - const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ptr_ty = self.typeOf(struct_field.struct_operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index); } @@ -5967,41 +5765,41 @@ pub const FuncGen = struct { ) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); + const struct_ptr_ty = self.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + const field_ty = struct_ty.structFieldType(field_index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { return null; } - const target = self.dg.module.getTarget(); - if (!isByRef(struct_ty)) { - assert(!isByRef(field_ty)); - switch (struct_ty.zigTypeTag()) { - .Struct => switch (struct_ty.containerLayout()) { + if (!isByRef(struct_ty, mod)) { + assert(!isByRef(field_ty, mod)); + switch (struct_ty.zigTypeTag(mod)) { + .Struct => switch (struct_ty.containerLayout(mod)) { .Packed => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - const bit_offset = struct_obj.packedFieldBitOffset(target, field_index); + const struct_obj = mod.typeToStruct(struct_ty).?; + const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6009,22 +5807,21 @@ pub const FuncGen = struct { return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); }, else => { - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, .Union => { - assert(struct_ty.containerLayout() == .Packed); + assert(struct_ty.containerLayout(mod) == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6035,30 +5832,35 @@ pub const FuncGen = struct { } } - switch (struct_ty.zigTypeTag()) { + switch (struct_ty.zigTypeTag(mod)) { .Struct => { - assert(struct_ty.containerLayout() != .Packed); - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + assert(struct_ty.containerLayout(mod) != .Packed); + const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); - if (isByRef(field_ty)) { + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); + const field_ptr_ty = try mod.ptrType(.{ + .child = llvm_field.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), + }, + }); + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(target), false); + assert(llvm_field.alignment != 0); + return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } }, .Union => { const union_llvm_ty = try self.dg.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); const llvm_field_ty = try self.dg.lowerType(field_ty); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6072,14 +5874,15 @@ pub const FuncGen = struct { } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try self.resolveInst(extra.field_ptr); const target = self.dg.module.getTarget(); - const parent_ty = self.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, target); + const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { @@ -6120,12 +5923,13 @@ pub const FuncGen = struct { fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const dib = self.dg.object.di_builder orelse return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.dg.module; + const func = mod.funcPtr(ty_fn.func); const decl_index = func.owner_decl; - const decl = self.dg.module.declPtr(decl_index); - const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); + const decl = mod.declPtr(decl_index); + const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; const cur_debug_location = self.builder.getCurrentDebugLocation2(); @@ -6136,22 +5940,37 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(self.dg.module); - defer self.gpa.free(fqn); + const fqn = try decl.getFullyQualifiedName(mod); - const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index); + const is_internal_linkage = !mod.decl_exports.contains(decl_index); + const fn_ty = try mod.funcType(.{ + .param_types = &.{}, + .return_type = .void_type, + .alignment = .none, + .noalias_bits = 0, + .comptime_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }); + const fn_di_ty = try self.dg.object.lowerDebugType(fn_ty, .full); const subprogram = dib.createFunction( di_file.toScope(), - decl.name, - fqn, + mod.intern_pool.stringToSlice(decl.name), + mod.intern_pool.stringToSlice(fqn), di_file, line_number, - try self.dg.object.lowerDebugType(Type.initTag(.fn_void_no_args), .full), + fn_di_ty, is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember, - self.dg.module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); @@ -6163,12 +5982,12 @@ pub const FuncGen = struct { fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { if (self.dg.object.di_builder == null) return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const mod = self.dg.module; + const func = mod.funcPtr(ty_fn.func); const decl = mod.declPtr(func.owner_decl); - const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); + const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const old = self.dbg_inlined.pop(); self.di_scope = old.scope; @@ -6192,18 +6011,19 @@ pub const FuncGen = struct { } fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const di_local_var = dib.createAutoVariable( self.di_scope.?, name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.object.lowerDebugType(ptr_ty.childType(), .full), + try self.dg.object.lowerDebugType(ptr_ty.childType(mod), .full), true, // always preserve 0, // flags ); @@ -6221,7 +6041,7 @@ pub const FuncGen = struct { const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); - const operand_ty = self.air.typeOf(pl_op.operand); + const operand_ty = self.typeOf(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); if (needDbgVarWorkaround(self.dg)) { @@ -6243,10 +6063,11 @@ pub const FuncGen = struct { null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); const insert_block = self.builder.getInsertBlock(); - if (isByRef(operand_ty)) { + const mod = self.dg.module; + if (isByRef(operand_ty, mod)) { _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = operand_ty.abiAlignment(mod); const alloca = self.buildAlloca(operand.typeOf(), alignment); const store_inst = self.builder.buildStore(operand, alloca); store_inst.setAlignment(alignment); @@ -6294,7 +6115,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); var llvm_ret_i: usize = 0; var llvm_param_i: usize = 0; @@ -6321,9 +6143,9 @@ pub const FuncGen = struct { llvm_ret_indirect[i] = (output != .none) and constraintAllowsMemory(constraint); if (output != .none) { const output_inst = try self.resolveInst(output); - const output_ty = self.air.typeOf(output); - assert(output_ty.zigTypeTag() == .Pointer); - const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); + const output_ty = self.typeOf(output); + assert(output_ty.zigTypeTag(mod) == .Pointer); + const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType(mod)); if (llvm_ret_indirect[i]) { // Pass the result by reference as an indirect output (e.g. "=*m") @@ -6339,7 +6161,7 @@ pub const FuncGen = struct { llvm_ret_i += 1; } } else { - const ret_ty = self.air.typeOfIndex(inst); + const ret_ty = self.typeOfIndex(inst); llvm_ret_types[llvm_ret_i] = try self.dg.lowerType(ret_ty); llvm_ret_i += 1; } @@ -6374,15 +6196,15 @@ pub const FuncGen = struct { extra_i += (constraint.len + name.len + (2 + 3)) / 4; const arg_llvm_value = try self.resolveInst(input); - const arg_ty = self.air.typeOf(input); + const arg_ty = self.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; - if (isByRef(arg_ty)) { + if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_llvm_ty = try self.dg.lowerType(arg_ty); const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); load_inst.setAlignment(alignment); @@ -6394,7 +6216,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); store_inst.setAlignment(alignment); @@ -6424,7 +6246,7 @@ pub const FuncGen = struct { // an elementtype() attribute. if (constraint[0] == '*') { llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse - try self.dg.lowerPtrElemTy(arg_ty.childType()); + try self.dg.lowerPtrElemTy(arg_ty.childType(mod)); } else { llvm_param_attrs[llvm_param_i] = null; } @@ -6596,10 +6418,10 @@ pub const FuncGen = struct { if (output != .none) { const output_ptr = try self.resolveInst(output); - const output_ptr_ty = self.air.typeOf(output); + const output_ptr_ty = self.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); - store_inst.setAlignment(output_ptr_ty.ptrAlignment(target)); + store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); } else { ret_val = output_value; } @@ -6615,22 +6437,21 @@ pub const FuncGen = struct { operand_is_ptr: bool, pred: llvm.IntPredicate, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); - const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const operand_ty = self.typeOf(un_op); + const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - if (optional_ty.optionalReprIsPayload()) { + const payload_ty = optional_ty.optionalChild(mod); + if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; - if (payload_ty.isSlice()) { + if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf)); + const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(mod)); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); @@ -6638,7 +6459,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6647,7 +6468,7 @@ pub const FuncGen = struct { return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); if (pred == .EQ) { return self.builder.buildNot(non_null_bit, ""); @@ -6662,15 +6483,16 @@ pub const FuncGen = struct { op: llvm.IntPredicate, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(); + const operand_ty = self.typeOf(un_op); + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; + const payload_ty = err_union_ty.errorUnionPayload(mod); const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const llvm_i1 = self.context.intType(1); switch (op) { .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 @@ -6679,7 +6501,7 @@ pub const FuncGen = struct { } } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(try self.dg.lowerType(err_union_ty), operand, "") else @@ -6687,10 +6509,9 @@ pub const FuncGen = struct { return self.builder.buildICmp(op, loaded, zero, ""); } - const target = self.dg.module.getTarget(); - const err_field_index = errUnionErrorOffset(payload_ty, target); + const err_field_index = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -6702,17 +6523,17 @@ pub const FuncGen = struct { } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. return operand; } @@ -6723,18 +6544,18 @@ pub const FuncGen = struct { fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { comptime assert(optional_layout_version == 3); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = self.context.intType(8).constInt(1, .False); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -6754,20 +6575,21 @@ pub const FuncGen = struct { } fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. return operand; } const opt_llvm_ty = try self.dg.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -6776,32 +6598,32 @@ pub const FuncGen = struct { body_tail: []const Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - const result_ty = self.air.typeOfIndex(inst); - const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; - const target = self.dg.module.getTarget(); + const operand_ty = self.typeOf(ty_op.operand); + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; + const result_ty = self.typeOfIndex(inst); + const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); if (operand_is_ptr) { return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (self.canElideLoad(body_tail)) return payload_ptr; - return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return self.builder.buildExtractValue(operand, offset, ""); @@ -6812,11 +6634,12 @@ pub const FuncGen = struct { inst: Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + const operand_ty = self.typeOf(ty_op.operand); + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { return operand; @@ -6827,16 +6650,15 @@ pub const FuncGen = struct { const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror); - const payload_ty = err_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = err_union_ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(err_set_llvm_ty, operand, ""); } - const target = self.dg.module.getTarget(); - const offset = errUnionErrorOffset(payload_ty, target); + const offset = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); @@ -6846,30 +6668,30 @@ pub const FuncGen = struct { } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(mod); - const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = err_union_ty.errorUnionPayload(mod); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; } - const target = self.dg.module.getTarget(); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); { - const error_offset = errUnionErrorOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; - const payload_offset = errUnionPayloadOffset(payload_ty, target); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, ""); } @@ -6885,42 +6707,41 @@ pub const FuncGen = struct { } fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; //const struct_ty = try self.resolveInst(ty_pl.ty); const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const mod = self.dg.module; + const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); - const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); + const field_ptr_ty = try mod.ptrType(.{ + .child = llvm_field.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), + }, + }); return self.load(field_ptr, field_ptr_ty); } fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + const optional_ty = self.typeOfIndex(inst); + if (optional_ty.optionalReprIsPayload(mod)) { return operand; } const llvm_optional_ty = try self.dg.lowerType(optional_ty); - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(target)); + if (isByRef(optional_ty, mod)) { + const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, ""); _ = self.builder.buildStore(non_null_bit, non_null_ptr); @@ -6931,30 +6752,26 @@ pub const FuncGen = struct { } fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = self.typeOf(ty_op.operand); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull(); const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(ok_err_code, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); return result_ptr; } @@ -6964,29 +6781,25 @@ pub const FuncGen = struct { } fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); - const payload_ty = err_un_ty.errorUnionPayload(); + const err_un_ty = self.typeOfIndex(inst); + const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(operand, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7021,20 +6834,20 @@ pub const FuncGen = struct { } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const data = self.air.instructions.items(.data)[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; const vector_ptr = try self.resolveInst(data.vector_ptr); - const vector_ptr_ty = self.air.typeOf(data.vector_ptr); + const vector_ptr_ty = self.typeOf(data.vector_ptr); const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); const loaded_vector = blk: { - const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); + const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); - const target = self.dg.module.getTarget(); - load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target)); - load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); + load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); + load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); break :blk load_inst; }; const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, ""); @@ -7043,24 +6856,26 @@ pub const FuncGen = struct { } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); return self.builder.buildUMin(lhs, rhs, ""); } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); return self.builder.buildUMax(lhs, rhs, ""); } @@ -7069,7 +6884,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` @@ -7081,14 +6896,15 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } @@ -7103,14 +6919,15 @@ pub const FuncGen = struct { } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, ""); return self.builder.buildUAddSat(lhs, rhs, ""); } @@ -7118,14 +6935,15 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } @@ -7140,28 +6958,30 @@ pub const FuncGen = struct { } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, ""); return self.builder.buildUSubSat(lhs, rhs, ""); } fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } @@ -7176,14 +6996,15 @@ pub const FuncGen = struct { } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, ""); return self.builder.buildUMulFixSat(lhs, rhs, ""); } @@ -7193,7 +7014,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); } @@ -7201,39 +7022,40 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); + if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7258,40 +7080,43 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, ""); return self.builder.buildExactUDiv(lhs, rhs, ""); } fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const inst_ty = self.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7301,11 +7126,10 @@ pub const FuncGen = struct { const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); return self.builder.buildSelect(ltz, c, a, ""); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + if (scalar_ty.isSignedInt(mod)) { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7328,13 +7152,14 @@ pub const FuncGen = struct { } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const ptr_ty = self.typeOf(bin_op.lhs); + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset }; @@ -7353,14 +7178,15 @@ pub const FuncGen = struct { } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const ptr_ty = self.typeOf(bin_op.lhs); + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ @@ -7386,36 +7212,33 @@ pub const FuncGen = struct { signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, ) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const scalar_ty = lhs_ty.scalarType(); - const dest_ty = self.air.typeOfIndex(inst); + const lhs_ty = self.typeOf(extra.lhs); + const scalar_ty = lhs_ty.scalarType(mod); + const dest_ty = self.typeOfIndex(inst); - const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; + const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_lhs_ty = try self.dg.lowerType(lhs_ty); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); const result = self.builder.buildExtractValue(result_struct, 0, ""); const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmField(dest_ty, 0, mod).?.index; + const overflow_index = llvmField(dest_ty, 1, mod).?.index; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7486,8 +7309,9 @@ pub const FuncGen = struct { ty: Type, params: [2]*llvm.Value, ) !*llvm.Value { + const mod = self.dg.module; const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { @@ -7531,8 +7355,8 @@ pub const FuncGen = struct { .gte => .SGE, }; - if (ty.zigTypeTag() == .Vector) { - const vec_len = ty.vectorLen(); + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); const vector_result_ty = llvm_i32.vectorType(vec_len); var result = vector_result_ty.getUndef(); @@ -7587,8 +7411,9 @@ pub const FuncGen = struct { comptime params_len: usize, params: [params_len]*llvm.Value, ) !*llvm.Value { - const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const mod = self.dg.module; + const target = mod.getTarget(); + const scalar_ty = ty.scalarType(mod); const llvm_ty = try self.dg.lowerType(ty); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7615,9 +7440,9 @@ pub const FuncGen = struct { const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); - const result = if (ty.zigTypeTag() == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); - const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); + const result = if (ty.zigTypeTag(mod) == .Vector) blk: { + const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); + const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod)); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); } else blk: { @@ -7662,9 +7487,9 @@ pub const FuncGen = struct { .libc => |fn_name| b: { const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); - return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); + return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } break :b libc_fn; @@ -7681,47 +7506,44 @@ pub const FuncGen = struct { const mulend2 = try self.resolveInst(extra.rhs); const addend = try self.resolveInst(pl_op.operand); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; const result = self.builder.buildShl(lhs, casted_rhs, ""); - const reconstructed = if (lhs_scalar_ty.isSignedInt()) + const reconstructed = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildAShr(result, casted_rhs, "") else self.builder.buildLShr(result, casted_rhs, ""); const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmField(dest_ty, 0, mod).?.index; + const overflow_index = llvmField(dest_ty, 1, mod).?.index; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7763,40 +7585,38 @@ pub const FuncGen = struct { } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); + if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); return self.builder.buildNUWShl(lhs, casted_rhs, ""); } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); - const rhs_type = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_type.scalarType(); - const rhs_scalar_ty = rhs_type.scalarType(); + const lhs_type = self.typeOf(bin_op.lhs); + const rhs_type = self.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_type.scalarType(mod); + const rhs_scalar_ty = rhs_type.scalarType(mod); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "") else rhs; @@ -7804,24 +7624,24 @@ pub const FuncGen = struct { } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const tg = self.dg.module.getTarget(); - const lhs_bits = lhs_scalar_ty.bitSize(tg); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const lhs_bits = lhs_scalar_ty.bitSize(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_bits) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits) self.builder.buildZExt(rhs, lhs.typeOf(), "") else rhs; - const result = if (lhs_scalar_ty.isSignedInt()) + const result = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildSShlSat(lhs, casted_rhs, "") else self.builder.buildUShlSat(lhs, casted_rhs, ""); @@ -7834,8 +7654,8 @@ pub const FuncGen = struct { const lhs_scalar_llvm_ty = try self.dg.lowerType(lhs_scalar_ty); const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); - if (rhs_ty.zigTypeTag() == .Vector) { - const vec_len = rhs_ty.vectorLen(); + if (rhs_ty.zigTypeTag(mod) == .Vector) { + const vec_len = rhs_ty.vectorLen(mod); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, ""); @@ -7847,23 +7667,22 @@ pub const FuncGen = struct { } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - const is_signed_int = lhs_scalar_ty.isSignedInt(); + const is_signed_int = lhs_scalar_ty.isSignedInt(mod); if (is_exact) { if (is_signed_int) { @@ -7881,14 +7700,14 @@ pub const FuncGen = struct { } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(target); + const dest_ty = self.typeOfIndex(inst); + const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const operand_info = operand_ty.intInfo(target); + const operand_ty = self.typeOf(ty_op.operand); + const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { switch (operand_info.signedness) { @@ -7905,16 +7724,17 @@ pub const FuncGen = struct { fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7939,11 +7759,12 @@ pub const FuncGen = struct { } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7970,25 +7791,25 @@ pub const FuncGen = struct { fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); } fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); - const inst_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); return self.bitCast(operand, operand_ty, inst_ty); } fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value { - const operand_is_ref = isByRef(operand_ty); - const result_is_ref = isByRef(inst_ty); + const mod = self.dg.module; + const operand_is_ref = isByRef(operand_ty, mod); + const result_is_ref = isByRef(inst_ty, mod); const llvm_dest_ty = try self.dg.lowerType(inst_ty); - const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) @@ -8001,27 +7822,27 @@ pub const FuncGen = struct { return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) { + if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) { return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) { - const elem_ty = operand_ty.childType(); + if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { + const elem_ty = operand_ty.childType(mod); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } const array_ptr = self.buildAlloca(llvm_dest_ty, null); - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const llvm_store = self.builder.buildStore(operand, array_ptr); - llvm_store.setAlignment(inst_ty.abiAlignment(target)); + llvm_store.setAlignment(inst_ty.abiAlignment(mod)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { const index_usize = llvm_usize.constInt(i, .False); @@ -8033,19 +7854,19 @@ pub const FuncGen = struct { } } return array_ptr; - } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { - const elem_ty = operand_ty.childType(); + } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { + const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - vector.setAlignment(elem_ty.abiAlignment(target)); + vector.setAlignment(elem_ty.abiAlignment(mod)); return vector; } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8055,7 +7876,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var vector = llvm_vector_ty.getUndef(); var i: u64 = 0; while (i < vector_len) : (i += 1) { @@ -8073,12 +7894,12 @@ pub const FuncGen = struct { if (operand_is_ref) { const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); - load_inst.setAlignment(operand_ty.abiAlignment(target)); + load_inst.setAlignment(operand_ty.abiAlignment(mod)); return load_inst; } if (result_is_ref) { - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8089,7 +7910,7 @@ pub const FuncGen = struct { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8108,22 +7929,23 @@ pub const FuncGen = struct { } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); if (self.dg.object.di_builder) |dib| { if (needDbgVarWorkaround(self.dg)) { return arg_val; } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getFunction().?; - const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; + const func = self.dg.decl.getOwnedFunction(mod).?; + const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( self.di_scope.?, - func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args + func.getParamName(mod, src_index).ptr, // TODO test 0 bit args self.di_file.?, lbrace_line, try self.dg.object.lowerDebugType(inst_ty, .full), @@ -8134,10 +7956,10 @@ pub const FuncGen = struct { const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null); const insert_block = self.builder.getInsertBlock(); - if (isByRef(inst_ty)) { + if (isByRef(inst_ty, mod)) { _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = inst_ty.abiAlignment(mod); const alloca = self.buildAlloca(arg_val.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_val, alloca); store_inst.setAlignment(alignment); @@ -8151,24 +7973,24 @@ pub const FuncGen = struct { } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const pointee_type = ptr_ty.childType(mod); + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); - const target = self.dg.module.getTarget(); - const alignment = ptr_ty.ptrAlignment(target); + const alignment = ptr_ty.ptrAlignment(mod); return self.buildAlloca(pointee_llvm_ty, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const ret_ty = ptr_ty.childType(mod); + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); - const target = self.dg.module.getTarget(); - return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(target)); + return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); } /// Use this instead of builder.buildAlloca, because this function makes sure to @@ -8178,12 +8000,13 @@ pub const FuncGen = struct { } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); + const ptr_ty = self.typeOf(bin_op.lhs); + const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8193,13 +8016,12 @@ pub const FuncGen = struct { u8_llvm_ty.constInt(0xaa, .False) else u8_llvm_ty.getUndef(); - const target = self.dg.module.getTarget(); - const operand_size = operand_ty.abiSize(target); + const operand_size = operand_ty.abiSize(mod); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); - const dest_ptr_align = ptr_ty.ptrAlignment(target); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); - if (safety and self.dg.module.comp.bin_file.options.valgrind) { + const dest_ptr_align = ptr_ty.ptrAlignment(mod); + _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8217,8 +8039,10 @@ pub const FuncGen = struct { /// /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { + const mod = fg.dg.module; + const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0])) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -8230,14 +8054,15 @@ pub const FuncGen = struct { } fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = fg.air.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_ty = fg.typeOf(ty_op.operand); + const ptr_info = ptr_ty.ptrInfo(mod); const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(ptr_info.pointee_type)) break :elide; + if (!isByRef(ptr_info.pointee_type, mod)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -8261,8 +8086,9 @@ pub const FuncGen = struct { fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; + const mod = self.dg.module; const llvm_usize = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 return llvm_usize.constNull(); @@ -8301,16 +8127,17 @@ pub const FuncGen = struct { } fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.air.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).childType(mod); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); new_value = self.builder.buildSExt(new_value, abi_ty, ""); } else { @@ -8328,7 +8155,7 @@ pub const FuncGen = struct { ); result.setWeak(llvm.Bool.fromBool(is_weak)); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { @@ -8336,7 +8163,7 @@ pub const FuncGen = struct { } const success_bit = self.builder.buildExtractValue(result, 1, ""); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } @@ -8347,13 +8174,14 @@ pub const FuncGen = struct { } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); - const ptr_ty = self.air.typeOf(pl_op.operand); - const operand_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(pl_op.operand); + const operand_ty = ptr_ty.childType(mod); const operand = try self.resolveInst(extra.operand); - const is_signed_int = operand_ty.isSignedInt(); + const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); @@ -8402,17 +8230,17 @@ pub const FuncGen = struct { } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); - const ptr_ty = self.air.typeOf(atomic_load.ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_ty = self.typeOf(atomic_load.ptr); + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false); - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_info.alignment(target); + const ptr_alignment = ptr_info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile"); const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8436,17 +8264,18 @@ pub const FuncGen = struct { inst: Air.Inst.Index, ordering: llvm.AtomicOrdering, ) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null; + const ptr_ty = self.typeOf(bin_op.lhs); + const operand_ty = ptr_ty.childType(mod); + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { element = self.builder.buildSExt(element, abi_ty, ""); } else { element = self.builder.buildZExt(element, abi_ty, ""); @@ -8457,19 +8286,19 @@ pub const FuncGen = struct { } fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = self.air.typeOf(bin_op.rhs); - const module = self.dg.module; - const target = module.getTarget(); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = self.typeOf(bin_op.rhs); + const target = mod.getTarget(); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); - const is_volatile = ptr_ty.isVolatilePtr(); + const is_volatile = ptr_ty.isVolatilePtr(mod); - if (self.air.value(bin_op.rhs)) |elem_val| { - if (elem_val.isUndefDeep()) { + if (try self.air.value(bin_op.rhs, mod)) |elem_val| { + if (elem_val.isUndefDeep(mod)) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. @@ -8480,7 +8309,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - if (safety and module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8490,8 +8319,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - var value_buffer: Value.Payload.U64 = undefined; - if (try elem_val.hasRepeatedByteRepr(elem_ty, module, &value_buffer)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val, @@ -8503,7 +8331,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -8535,9 +8363,9 @@ pub const FuncGen = struct { const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False), + .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), .Many, .C => unreachable, }; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8551,9 +8379,9 @@ pub const FuncGen = struct { _ = self.builder.buildCondBr(end, body_block, end_block); self.builder.positionBuilderAtEnd(body_block); - const elem_abi_alignment = elem_ty.abiAlignment(target); + const elem_abi_alignment = elem_ty.abiAlignment(mod); const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { _ = self.builder.buildMemCpy( it_ptr, it_ptr_alignment, @@ -8583,19 +8411,19 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const dest_ptr_ty = self.air.typeOf(bin_op.lhs); + const dest_ptr_ty = self.typeOf(bin_op.lhs); const src_slice = try self.resolveInst(bin_op.rhs); - const src_ptr_ty = self.air.typeOf(bin_op.rhs); + const src_ptr_ty = self.typeOf(bin_op.rhs); const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); - const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); _ = self.builder.buildMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(target), + dest_ptr_ty.ptrAlignment(mod), src_ptr, - src_ptr_ty.ptrAlignment(target), + src_ptr_ty.ptrAlignment(mod), len, is_volatile, ); @@ -8603,10 +8431,10 @@ pub const FuncGen = struct { } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.air.typeOf(bin_op.lhs).childType(); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const un_ty = self.typeOf(bin_op.lhs).childType(mod); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -8624,13 +8452,13 @@ pub const FuncGen = struct { } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const un_ty = self.typeOf(ty_op.operand); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty)) { + if (isByRef(un_ty, mod)) { const llvm_un_ty = try self.dg.lowerType(un_ty); if (layout.payload_size == 0) { return self.builder.buildLoad(llvm_un_ty, union_handle, ""); @@ -8650,7 +8478,7 @@ pub const FuncGen = struct { fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(op, operand_ty, 1, .{operand}); } @@ -8660,14 +8488,15 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); @@ -8676,12 +8505,11 @@ pub const FuncGen = struct { const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8692,8 +8520,9 @@ pub const FuncGen = struct { } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; @@ -8701,12 +8530,11 @@ pub const FuncGen = struct { const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8717,10 +8545,10 @@ pub const FuncGen = struct { } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); - var bits = operand_ty.intInfo(target).bits; + const operand_ty = self.typeOf(ty_op.operand); + var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); @@ -8730,8 +8558,8 @@ pub const FuncGen = struct { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); - if (operand_ty.zigTypeTag() == .Vector) { - const vec_len = operand_ty.vectorLen(); + if (operand_ty.zigTypeTag(mod) == .Vector) { + const vec_len = operand_ty.vectorLen(mod); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -8757,9 +8585,9 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const result_bits = result_ty.intInfo(target).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8770,28 +8598,23 @@ pub const FuncGen = struct { } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.getRefType(ty_op.ty); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); for (names) |name| { - const err_int = self.dg.module.global_error_set.get(name).?; - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = err_int, - }; - break :int try self.dg.lowerValue(.{ - .ty = Type.err_int, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, err_int), + }); switch_instr.addCase(this_tag_int_value, valid_block); } self.builder.positionBuilderAtEnd(valid_block); @@ -8817,7 +8640,7 @@ pub const FuncGen = struct { fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -8825,25 +8648,22 @@ pub const FuncGen = struct { } fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { - const enum_decl = enum_ty.getOwnerDecl(); + const mod = self.dg.module; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. - const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl); + const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(self.dg.object.named_enum_map.remove(enum_decl)); + errdefer assert(self.dg.object.named_enum_map.remove(enum_type.decl)); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const mod = self.dg.module; - const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); - defer self.gpa.free(fqn); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); + const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); - const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; + const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -8866,21 +8686,17 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); - const fields = enum_ty.enumFields(); const named_block = self.context.appendBasicBlock(fn_val, "Named"); const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count())); + const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); - for (fields.keys(), 0..) |_, field_index| { + for (enum_type.names, 0..) |_, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; break :int try self.dg.lowerValue(.{ .ty = enum_ty, - .val = Value.initPayload(&tag_val_payload.base), + .val = try mod.enumValueFieldIndex(enum_ty, field_index), }); }; switch_instr.addCase(this_tag_int_value, named_block); @@ -8896,7 +8712,7 @@ pub const FuncGen = struct { fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getEnumTagNameFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -8904,31 +8720,27 @@ pub const FuncGen = struct { } fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { - const enum_decl = enum_ty.getOwnerDecl(); + const mod = self.dg.module; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. - const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_decl); + const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(self.dg.object.decl_map.remove(enum_decl)); + errdefer assert(self.dg.object.decl_map.remove(enum_type.decl)); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const mod = self.dg.module; - const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); - defer self.gpa.free(fqn); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); + const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.slice_const_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); - const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; + const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -8950,16 +8762,17 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); - const fields = enum_ty.enumFields(); const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count())); + const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len)); const array_ptr_indices = [_]*llvm.Value{ usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; - for (fields.keys(), 0..) |name, field_index| { + for (enum_type.names, 0..) |name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); + const name = mod.intern_pool.stringToSlice(name_ip); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, ""); @@ -8982,16 +8795,10 @@ pub const FuncGen = struct { slice_global.setAlignment(slice_alignment); const return_block = self.context.appendBasicBlock(fn_val, "Name"); - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - break :int try self.dg.lowerValue(.{ - .ty = enum_ty, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = enum_ty, + .val = try mod.enumValueFieldIndex(enum_ty, field_index), + }); switch_instr.addCase(this_tag_int_value, return_block); self.builder.positionBuilderAtEnd(return_block); @@ -9027,7 +8834,7 @@ pub const FuncGen = struct { fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const slice_llvm_ty = try self.dg.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); @@ -9039,10 +8846,11 @@ pub const FuncGen = struct { } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9057,13 +8865,14 @@ pub const FuncGen = struct { } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); const b = try self.resolveInst(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9076,12 +8885,11 @@ pub const FuncGen = struct { const llvm_i32 = self.context.intType(32); for (values, 0..) |*val, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.dg.module, i, &buf); - if (elem.isUndef()) { + const elem = try mask.elemValue(mod, i); + if (elem.isUndef(mod)) { val.* = llvm_i32.getUndef(); } else { - const int = elem.toSignedInt(self.dg.module.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); val.* = llvm_i32.constInt(unsigned, .False); } @@ -9157,32 +8965,33 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); - const operand_ty = self.air.typeOf(reduce.operand); - const scalar_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(reduce.operand); + const scalar_ty = self.typeOfIndex(inst); switch (reduce.operation) { .And => return self.builder.buildAndReduce(operand), .Or => return self.builder.buildOrReduce(operand), .Xor => return self.builder.buildXorReduce(operand), - .Min => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()), + .Min => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMinReduce(operand); }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()), + .Max => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMaxReduce(operand); }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9191,7 +9000,7 @@ pub const FuncGen = struct { }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9221,35 +9030,32 @@ pub const FuncGen = struct { }) catch unreachable, else => unreachable, }; - var init_value_payload = Value.Payload.Float_32{ - .data = switch (reduce.operation) { - .Min => std.math.nan(f32), - .Max => std.math.nan(f32), - .Add => -0.0, - .Mul => 1.0, - else => unreachable, - }, - }; const param_llvm_ty = try self.dg.lowerType(scalar_ty); const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty }; const libc_fn = self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); const init_value = try self.dg.lowerValue(.{ .ty = scalar_ty, - .val = Value.initPayload(&init_value_payload.base), + .val = try mod.floatValue(scalar_ty, switch (reduce.operation) { + .Min => std.math.nan(f32), + .Max => std.math.nan(f32), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), }); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value); + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const result_ty = self.air.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const result_ty = self.typeOfIndex(inst); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Vector => { const llvm_u32 = self.context.intType(32); @@ -9262,10 +9068,10 @@ pub const FuncGen = struct { return vector; }, .Struct => { - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -9273,12 +9079,12 @@ pub const FuncGen = struct { var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9294,30 +9100,28 @@ pub const FuncGen = struct { return running_int; } - var ptr_ty_buf: Type.Payload.Pointer = undefined; - - if (isByRef(result_ty)) { + if (isByRef(result_ty, mod)) { const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmField(result_ty, i, mod).?.index; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); - var field_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = self.air.typeOf(elem), - .@"align" = result_ty.structFieldAlign(i, target), - .@"addrspace" = .generic, + const field_ptr_ty = try mod.ptrType(.{ + .child = self.typeOf(elem).toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits( + result_ty.structFieldAlign(i, mod), + ), }, - }; - const field_ptr_ty = Type.initPayload(&field_ptr_payload.base); + }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic); } @@ -9325,29 +9129,25 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmField(result_ty, i, mod).?.index; result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); } return result; } }, .Array => { - assert(isByRef(result_ty)); + assert(isByRef(result_ty, mod)); const llvm_usize = try self.dg.lowerType(Type.usize); - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); - const array_info = result_ty.arrayInfo(); - var elem_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = array_info.elem_type, - .@"addrspace" = .generic, - }, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base); + const array_info = result_ty.arrayInfo(mod); + const elem_ptr_ty = try mod.ptrType(.{ + .child = array_info.elem_type.toIntern(), + }); for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ @@ -9379,22 +9179,22 @@ pub const FuncGen = struct { } fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); - const target = self.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const layout = union_ty.unionGetLayout(mod); + const union_obj = mod.typeToUnion(union_ty).?; if (union_obj.layout == .Packed) { - const big_bits = union_ty.bitSize(target); + const big_bits = union_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9402,26 +9202,21 @@ pub const FuncGen = struct { } const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; - const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); - var int_payload: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); - break :blk tag_int_val.toUnsignedInt(target); + const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); + break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return null; } - assert(!isByRef(union_ty)); + assert(!isByRef(union_ty, mod)); return union_llvm_ty.constInt(tag_int, .False); } - assert(isByRef(union_ty)); + assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -9431,12 +9226,12 @@ pub const FuncGen = struct { assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; const field_llvm_ty = try self.dg.lowerType(field.ty); - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const llvm_union_ty = t: { const payload = p: { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p self.context.intType(8).arrayType(padding_len); } @@ -9472,14 +9267,12 @@ pub const FuncGen = struct { // tag and the payload. const index_type = self.context.intType(32); - var field_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = field.ty, - .@"align" = field_align, - .@"addrspace" = .generic, + const field_ptr_ty = try mod.ptrType(.{ + .child = field.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), }, - }; - const field_ptr_ty = Type.initPayload(&field_ptr_payload.base); + }); if (layout.tag_size == 0) { const indices: [3]*llvm.Value = .{ index_type.constNull(), @@ -9511,7 +9304,7 @@ pub const FuncGen = struct { const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); - store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target)); + store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); } return result_ptr; @@ -9535,7 +9328,8 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { .x86_64, @@ -9584,7 +9378,7 @@ pub const FuncGen = struct { fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const llvm_dest_ty = try self.dg.lowerType(inst_ty); @@ -9658,8 +9452,9 @@ pub const FuncGen = struct { return table; } - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); + const mod = self.dg.module; + const slice_ty = Type.slice_const_u8_sentinel_0; + const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); @@ -9701,16 +9496,15 @@ pub const FuncGen = struct { opt_ty: Type, can_elide_load: bool, ) !*llvm.Value { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const mod = fg.dg.module; + const payload_ty = opt_ty.optionalChild(mod); - if (isByRef(opt_ty)) { + if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, ""); - const target = fg.dg.module.getTarget(); - const payload_alignment = payload_ty.abiAlignment(target); - if (isByRef(payload_ty)) { + const payload_alignment = payload_ty.abiAlignment(mod); + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; @@ -9722,7 +9516,7 @@ pub const FuncGen = struct { return load_inst; } - assert(!isByRef(payload_ty)); + assert(!isByRef(payload_ty, mod)); return fg.builder.buildExtractValue(opt_handle, 0, ""); } @@ -9734,10 +9528,10 @@ pub const FuncGen = struct { ) !?*llvm.Value { const optional_llvm_ty = try self.dg.lowerType(optional_ty); const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); + const mod = self.dg.module; - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const payload_alignment = optional_ty.abiAlignment(target); + if (isByRef(optional_ty, mod)) { + const payload_alignment = optional_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment); { @@ -9765,13 +9559,13 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const target = self.dg.object.target; - const struct_ty = struct_ptr_ty.childType(); - switch (struct_ty.zigTypeTag()) { - .Struct => switch (struct_ty.containerLayout()) { + const mod = self.dg.module; + const struct_ty = struct_ptr_ty.childType(mod); + switch (struct_ty.zigTypeTag(mod)) { + .Struct => switch (struct_ty.containerLayout(mod)) { .Packed => { - const result_ty = self.air.typeOfIndex(inst); - const result_ty_info = result_ty.ptrInfo().data; + const result_ty = self.typeOfIndex(inst); + const result_ty_info = result_ty.ptrInfo(mod); if (result_ty_info.host_size != 0) { // From LLVM's perspective, a pointer to a packed struct and a pointer @@ -9783,7 +9577,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target); + const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); const llvm_usize = try self.dg.lowerType(Type.usize); @@ -9794,24 +9588,23 @@ pub const FuncGen = struct { else => { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); - var ty_buf: Type.Payload.Pointer = undefined; - if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| { - return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, ""); + if (llvmField(struct_ty, field_index, mod)) |llvm_field| { + return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, ""); } else { // If we found no index then this means this is a zero sized field at the // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. const llvm_u32 = self.context.intType(32); - const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(target); - if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; + const layout = struct_ty.unionGetLayout(mod); + if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_llvm_ty = try self.dg.lowerType(struct_ty); const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); @@ -9835,12 +9628,12 @@ pub const FuncGen = struct { ptr_alignment: u32, is_volatile: bool, ) !*llvm.Value { + const mod = fg.dg.module; const pointee_llvm_ty = try fg.dg.lowerType(pointee_type); - const target = fg.dg.module.getTarget(); - const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target)); + const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits); - const size_bytes = pointee_type.abiSize(target); + const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); + const size_bytes = pointee_type.abiSize(mod); _ = fg.builder.buildMemCpy( result_ptr, result_align, @@ -9856,12 +9649,12 @@ pub const FuncGen = struct { /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { - const info = ptr_ty.ptrInfo().data; - if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null; + const mod = self.dg.module; + const info = ptr_ty.ptrInfo(mod); + if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; - const target = self.dg.module.getTarget(); - const ptr_alignment = info.alignment(target); - const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); + const ptr_alignment = info.alignment(mod); + const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr(mod)); assert(info.vector_index != .runtime); if (info.vector_index != .none) { @@ -9877,7 +9670,7 @@ pub const FuncGen = struct { } if (info.host_size == 0) { - if (isByRef(info.pointee_type)) { + if (isByRef(info.pointee_type, mod)) { return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile"); } const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9892,13 +9685,13 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); - if (isByRef(info.pointee_type)) { - const result_align = info.pointee_type.abiAlignment(target); + if (isByRef(info.pointee_type, mod)) { + const result_align = info.pointee_type.abiAlignment(mod); const result_ptr = self.buildAlloca(elem_llvm_ty, result_align); const same_size_int = self.context.intType(elem_bits); @@ -9908,13 +9701,13 @@ pub const FuncGen = struct { return result_ptr; } - if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) { + if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } - if (info.pointee_type.isPtrAtRuntime()) { + if (info.pointee_type.isPtrAtRuntime(mod)) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -9930,13 +9723,13 @@ pub const FuncGen = struct { elem: *llvm.Value, ordering: llvm.AtomicOrdering, ) !void { - const info = ptr_ty.ptrInfo().data; + const mod = self.dg.module; + const info = ptr_ty.ptrInfo(mod); const elem_ty = info.pointee_type; - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_ty.ptrAlignment(target); + const ptr_alignment = ptr_ty.ptrAlignment(mod); const ptr_volatile = llvm.Bool.fromBool(info.@"volatile"); assert(info.vector_index != .runtime); @@ -9964,13 +9757,13 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store const value_bits_type = self.context.intType(elem_bits); - const value_bits = if (elem_ty.isPtrAtRuntime()) + const value_bits = if (elem_ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(elem, value_bits_type, "") else self.builder.buildBitCast(elem, value_bits_type, ""); @@ -9991,7 +9784,7 @@ pub const FuncGen = struct { store_inst.setVolatile(ptr_volatile); return; } - if (!isByRef(elem_ty)) { + if (!isByRef(elem_ty, mod)) { const store_inst = self.builder.buildStore(elem, ptr); store_inst.setOrdering(ordering); store_inst.setAlignment(ptr_alignment); @@ -9999,13 +9792,13 @@ pub const FuncGen = struct { return; } assert(ordering == .NotAtomic); - const size_bytes = elem_ty.abiSize(target); + const size_bytes = elem_ty.abiSize(mod); _ = self.builder.buildMemCpy( ptr, ptr_alignment, elem, - elem_ty.abiAlignment(target), - self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False), + elem_ty.abiAlignment(mod), + self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False), info.@"volatile", ); } @@ -10030,11 +9823,12 @@ pub const FuncGen = struct { a4: *llvm.Value, a5: *llvm.Value, ) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target)); + const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod)); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { @@ -10111,6 +9905,16 @@ pub const FuncGen = struct { ); return call; } + + fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { + const mod = fg.dg.module; + return fg.air.typeOf(inst, &mod.intern_pool); + } + + fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { + const mod = fg.dg.module; + return fg.air.typeOfIndex(inst, &mod.intern_pool); + } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { @@ -10444,62 +10248,64 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ }; } +const LlvmField = struct { + index: c_uint, + ty: Type, + alignment: u32, +}; + /// Take into account 0 bit fields and padding. Returns null if an llvm /// field could not be found. /// This only happens if you want the field index of a zero sized field at /// the end of the struct. -fn llvmFieldIndex( - ty: Type, - field_index: usize, - target: std.Target, - ptr_pl_buf: *Type.Payload.Pointer, -) ?c_uint { +fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { // Detects where we inserted extra padding fields so that we can skip // over them in this function. comptime assert(struct_layout_version == 2); var offset: u64 = 0; var big_align: u32 = 0; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - var llvm_field_index: c_uint = 0; - for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |tuple| { + var llvm_field_index: c_uint = 0; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + llvm_field_index += 1; + } + + if (field_index <= i) { + return .{ + .index = llvm_field_index, + .ty = field_ty.toType(), + .alignment = field_align, + }; + } - const padding_len = offset - prev_offset; - if (padding_len > 0) { llvm_field_index += 1; + offset += field_ty.toType().abiSize(mod); } - - if (field_index <= i) { - ptr_pl_buf.* = .{ - .data = .{ - .pointee_type = field_ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, - }; - return llvm_field_index; - } - - llvm_field_index += 1; - offset += field_ty.abiSize(target); - } - return null; - } - const layout = ty.containerLayout(); + return null; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const layout = struct_obj.layout; assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, layout); + const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10510,54 +10316,52 @@ fn llvmFieldIndex( } if (field_index == field_and_index.index) { - ptr_pl_buf.* = .{ - .data = .{ - .pointee_type = field.ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, + return .{ + .index = llvm_field_index, + .ty = field.ty, + .alignment = field_align, }; - return llvm_field_index; } llvm_field_index += 1; - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } else { // We did not find an llvm field that corresponds to this zig field. return null; } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false; +fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { + if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; + const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type), + .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, target), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, target), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - .Stdcall => return !isScalar(fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type.toType()), else => return false, } } -fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, target, .ret); +fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { + const class = x86_64_abi.classifySystemV(ty, mod, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -10566,75 +10370,77 @@ fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { /// In order to support the C calling convention, some return types need to be lowered /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. -fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { +fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { + const mod = dg.module; + const return_type = fn_info.return_type.toType(); + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError()) { + if (return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); } } - const target = dg.module.getTarget(); + const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type)) { + if (isByRef(return_type, mod)) { return dg.context.voidType(); } else { - return dg.lowerType(fn_info.return_type); + return dg.lowerType(return_type); } }, .C => { switch (target.cpu.arch) { - .mips, .mipsel => return dg.lowerType(fn_info.return_type), + .mips, .mipsel => return dg.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(dg, fn_info), else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, target); + const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, target); - const abi_size = scalar_type.abiSize(target); + const scalar_type = wasm_c_abi.scalarType(return_type, mod); + const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) { + switch (aarch64_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), - .float_array => return dg.lowerType(fn_info.return_type), - .byval => return dg.lowerType(fn_info.return_type), + .float_array => return dg.lowerType(return_type), + .byval => return dg.lowerType(return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + switch (arm_c_abi.classifyType(return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); } else { return dg.context.voidType(); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, target)) { + switch (riscv_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10644,50 +10450,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { }; return dg.context.structType(&llvm_types_buffer, 2, .False); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, // TODO investigate C ABI for other architectures - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } }, .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { return dg.context.voidType(); } }, - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } } -fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - const target = dg.module.getTarget(); - switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) { +fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { + const mod = dg.module; + const return_type = fn_info.return_type.toType(); + switch (x86_64_abi.classifyWindows(return_type, mod)) { .integer => { - if (isScalar(fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, .win_i128 => return dg.context.intType(64).vectorType(2), .memory => return dg.context.voidType(), - .sse => return dg.lowerType(fn_info.return_type), + .sse => return dg.lowerType(return_type), else => unreachable, } } -fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (isScalar(fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); +fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { + const mod = dg.module; + const return_type = fn_info.return_type.toType(); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const target = dg.module.getTarget(); - const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret); + const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10728,7 +10536,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10736,10 +10544,9 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm const ParamTypeIterator = struct { dg: *DeclGen, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, zig_index: u32, llvm_index: u32, - target: std.Target, llvm_types_len: u32, llvm_types_buffer: [8]*llvm.Type, byval_attr: bool, @@ -10762,7 +10569,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= it.fn_info.param_types.len) return null; const ty = it.fn_info.param_types[it.zig_index]; it.byval_attr = false; - return nextInner(it, ty); + return nextInner(it, ty.toType()); } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. @@ -10771,15 +10578,18 @@ const ParamTypeIterator = struct { if (it.zig_index >= args.len) { return null; } else { - return nextInner(it, fg.air.typeOf(args[it.zig_index])); + return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { - return nextInner(it, it.fn_info.param_types[it.zig_index]); + return nextInner(it, it.fn_info.param_types[it.zig_index].toType()); } } fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + const mod = it.dg.module; + const target = mod.getTarget(); + + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { it.zig_index += 1; return .no_bits; } @@ -10787,11 +10597,10 @@ const ParamTypeIterator = struct { .Unspecified, .Inline => { it.zig_index += 1; it.llvm_index += 1; - var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) { + if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod))) { it.llvm_index += 1; return .slice; - } else if (isByRef(ty)) { + } else if (isByRef(ty, mod)) { return .byref; } else { return .byval; @@ -10801,23 +10610,23 @@ const ParamTypeIterator = struct { @panic("TODO implement async function lowering in the LLVM backend"); }, .C => { - switch (it.target.cpu.arch) { + switch (target.cpu.arch) { .mips, .mipsel => { it.zig_index += 1; it.llvm_index += 1; return .byval; }, - .x86_64 => switch (it.target.os.tag) { + .x86_64 => switch (target.os.tag) { .windows => return it.nextWin64(ty), else => return it.nextSystemV(ty), }, .wasm32 => { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, it.target); + const classes = wasm_c_abi.classifyType(ty, mod); if (classes[0] == .indirect) { return .byref; } @@ -10826,7 +10635,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, it.target)) { + switch (aarch64_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -10841,7 +10650,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, it.target, .arg)) { + switch (arm_c_abi.classifyType(ty, mod, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -10854,10 +10663,10 @@ const ParamTypeIterator = struct { .riscv32, .riscv64 => { it.zig_index += 1; it.llvm_index += 1; - if (ty.tag() == .f16) { + if (ty.toIntern() == .f16_type) { return .as_u16; } - switch (riscv_c_abi.classifyType(ty, it.target)) { + switch (riscv_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -10878,7 +10687,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } else { it.byval_attr = true; @@ -10894,9 +10703,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - switch (x86_64_abi.classifyWindows(ty, it.target)) { + const mod = it.dg.module; + switch (x86_64_abi.classifyWindows(ty, mod)) { .integer => { - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10926,14 +10736,15 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering { - const classes = x86_64_abi.classifySystemV(ty, it.target, .arg); + const mod = it.dg.module; + const classes = x86_64_abi.classifySystemV(ty, mod, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10986,13 +10797,12 @@ const ParamTypeIterator = struct { } }; -fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator { +fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator { return .{ .dg = dg, .fn_info = fn_info, .zig_index = 0, .llvm_index = 0, - .target = dg.module.getTarget(), .llvm_types_buffer = undefined, .llvm_types_len = 0, .byval_attr = false, @@ -11001,16 +10811,17 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - target: std.Target, + mod: *Module, ty: Type, ) ?std.builtin.Signedness { + const target = mod.getTarget(); switch (cc) { .Unspecified, .Inline, .Async => return null, else => {}, } - const int_info = switch (ty.zigTypeTag()) { - .Bool => Type.u1.intInfo(target), - .Int, .Enum, .ErrorSet => ty.intInfo(target), + const int_info = switch (ty.zigTypeTag(mod)) { + .Bool => Type.u1.intInfo(mod), + .Int, .Enum, .ErrorSet => ty.intInfo(mod), else => return null, }; if (int_info.bits <= 16) return int_info.signedness; @@ -11039,12 +10850,12 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type) bool { +fn isByRef(ty: Type, mod: *Module) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11067,51 +10878,53 @@ fn isByRef(ty: Type) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(), + .Array, .Frame => return ty.hasRuntimeBits(mod), .Struct => { // Packed structs are represented to LLVM as integers. - if (ty.containerLayout() == .Packed) return false; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - var count: usize = 0; - for (tuple.values, 0..) |field_val, i| { - if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue; + if (ty.containerLayout(mod) == .Packed) return false; + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |tuple| { + var count: usize = 0; + for (tuple.types, tuple.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - count += 1; - if (count > max_fields_byval) return true; - if (isByRef(tuple.types[i])) return true; - } - return false; - } + count += 1; + if (count > max_fields_byval) return true; + if (isByRef(field_ty.toType(), mod)) return true; + } + return false; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; var count: usize = 0; - const fields = ty.structFields(); - for (fields.values()) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(field.ty)) return true; + if (isByRef(field.ty, mod)) return true; } return false; }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => return false, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return false; } return true; @@ -11119,8 +10932,8 @@ fn isByRef(ty: Type) bool { } } -fn isScalar(ty: Type) bool { - return switch (ty.zigTypeTag()) { +fn isScalar(mod: *Module, ty: Type) bool { + return switch (ty.zigTypeTag(mod)) { .Void, .Bool, .NoReturn, @@ -11134,8 +10947,8 @@ fn isScalar(ty: Type) bool { .Vector, => true, - .Struct => ty.containerLayout() == .Packed, - .Union => ty.containerLayout() == .Packed, + .Struct => ty.containerLayout(mod) == .Packed, + .Union => ty.containerLayout(mod) == .Packed, else => false, }; } @@ -11184,10 +10997,10 @@ fn backendSupportsF128(target: std.Target) bool { /// LLVM does not support all relevant intrinsics for all targets, so we /// may need to manually generate a libc call fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { - return switch (scalar_ty.tag()) { - .f16 => backendSupportsF16(target), - .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), - .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), + return switch (scalar_ty.toIntern()) { + .f16_type => backendSupportsF16(target), + .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), + .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), else => true, }; } @@ -11304,12 +11117,12 @@ fn buildAllocaInner( return alloca; } -fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)); +fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } -fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target)); +fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 09ace669a9..4fd91aded4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -218,8 +218,9 @@ pub const DeclGen = struct { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const mod = self.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index)); + const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index), mod); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; @@ -231,12 +232,13 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { - if (self.air.value(inst)) |val| { - const ty = self.air.typeOf(inst); - if (ty.zigTypeTag() == .Fn) { - const fn_decl_index = switch (val.tag()) { - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, + const mod = self.module; + if (try self.air.value(inst, mod)) |val| { + const ty = self.typeOf(inst); + if (ty.zigTypeTag(mod) == .Fn) { + const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, else => unreachable, }; const spv_decl_index = try self.resolveDecl(fn_decl_index); @@ -254,12 +256,12 @@ pub const DeclGen = struct { /// Note: Function does not actually generate the decl. fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index { const decl = self.module.declPtr(decl_index); - self.module.markDeclAlive(decl); + try self.module.markDeclAlive(decl); const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.val.tag() == .function) + const kind: SpvModule.DeclKind = if (decl.val.getFunctionIndex(self.module) != .none) .func else .global; @@ -340,8 +342,9 @@ pub const DeclGen = struct { } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo { + const mod = self.module; const target = self.getTarget(); - return switch (ty.zigTypeTag()) { + return switch (ty.zigTypeTag(mod)) { .Bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .is_vector = false, @@ -355,7 +358,7 @@ pub const DeclGen = struct { .class = .float, }, .Int => blk: { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); break :blk ArithmeticTypeInfo{ @@ -533,34 +536,35 @@ pub const DeclGen = struct { } fn addInt(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); - const int_info = ty.intInfo(target); + const mod = self.dg.module; + const int_info = ty.intInfo(mod); const int_bits = switch (int_info.signedness) { - .signed => @bitCast(u64, val.toSignedInt(target)), - .unsigned => val.toUnsignedInt(target), + .signed => @bitCast(u64, val.toSignedInt(mod)), + .unsigned => val.toUnsignedInt(mod), }; // TODO: Swap endianess if the compiler is big endian. - const len = ty.abiSize(target); + const len = ty.abiSize(mod); try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]); } fn addFloat(self: *@This(), ty: Type, val: Value) !void { + const mod = self.dg.module; const target = self.dg.getTarget(); - const len = ty.abiSize(target); + const len = ty.abiSize(mod); // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { 16 => { - const float_bits = val.toFloat(f16); + const float_bits = val.toFloat(f16, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 32 => { - const float_bits = val.toFloat(f32); + const float_bits = val.toFloat(f32, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 64 => { - const float_bits = val.toFloat(f64); + const float_bits = val.toFloat(f64, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, else => unreachable, @@ -569,6 +573,7 @@ pub const DeclGen = struct { fn addDeclRef(self: *@This(), ty: Type, decl_index: Decl.Index) !void { const dg = self.dg; + const mod = dg.module; const ty_ref = try self.dg.resolveType(ty, .indirect); const ty_id = dg.typeId(ty_ref); @@ -576,19 +581,18 @@ pub const DeclGen = struct { const decl = dg.module.declPtr(decl_index); const spv_decl_index = try dg.resolveDecl(decl_index); - switch (decl.val.tag()) { - .function => { + switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .func => { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.initTag(.zero)); + try self.addInt(Type.usize, Value.zero_usize); // TODO: Add dependency return; }, - .extern_fn => unreachable, // TODO + .extern_func => unreachable, // TODO else => { const result_id = dg.spv.allocId(); - log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name }); try self.decl_deps.put(spv_decl_index, {}); @@ -606,117 +610,122 @@ pub const DeclGen = struct { } } - fn lower(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); + fn lower(self: *@This(), ty: Type, arg_val: Value) !void { const dg = self.dg; + const mod = dg.module; - if (val.isUndef()) { - const size = ty.abiSize(target); + var val = arg_val; + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, + } + + if (val.isUndefDeep(mod)) { + const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag()) { - .Int => try self.addInt(ty, val), - .Float => try self.addFloat(ty, val), - .Bool => try self.addConstBool(val.toBool()), - .Array => switch (val.tag()) { - .aggregate => { - const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLenIncludingSentinel()); // TODO: limit spir-v to 32 bit arrays in a more elegant way. - for (elem_vals[0..len]) |elem_val| { - try self.lower(elem_ty, elem_val); - } - }, - .repeated => { - const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLen()); - for (0..len) |_| { - try self.lower(elem_ty, elem_val); - } - if (ty.sentinel()) |sentinel| { - try self.lower(elem_ty, sentinel); - } - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try self.addBytes(bytes); - if (ty.sentinel()) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target))); - } - }, - .bytes => { - const bytes = val.castTag(.bytes).?.data; - try self.addBytes(bytes); - }, - else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), - }, - .Pointer => switch (val.tag()) { - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - try self.addDeclRef(ty, decl_index); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - try self.addDeclRef(ty, decl_index); - }, - .slice => { - const slice = val.castTag(.slice).?.data; + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); - - try self.lower(ptr_ty, slice.ptr); - try self.addInt(Type.usize, slice.len); - }, - .null_value, .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try self.addInt(Type.usize, val); - }, - else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try self.addConstBool(val.toBool()), }, - .Struct => { - if (ty.isSimpleTupleOrAnonStruct()) { - unreachable; // TODO + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => try self.addInt(ty, val), + .err => |err| { + const int = try mod.getErrorValue(err.name); + try self.addConstInt(u16, @intCast(u16, int)); + }, + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const is_pl = val.errorUnionIsPayload(mod); + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); + + const eu_layout = dg.errorUnionLayout(payload_ty); + if (!eu_layout.payload_has_bits) { + return try self.lower(Type.anyerror, error_val); + } + + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); + const padding = ty_size - payload_size - error_size; + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + if (eu_layout.error_first) { + try self.lower(Type.anyerror, error_val); + try self.lower(payload_ty, payload_val); } else { - const struct_ty = ty.castTag(.@"struct").?.data; + try self.lower(payload_ty, payload_val); + try self.lower(Type.anyerror, error_val); + } - if (struct_ty.layout == .Packed) { - return dg.todo("packed struct constants", .{}); - } + try self.addUndef(padding); + }, + .enum_tag => { + const int_val = try val.enumToInt(ty, mod); - const struct_begin = self.size; - const field_vals = val.castTag(.aggregate).?.data; - for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; - try self.lower(field.ty, field_vals[i]); + const int_ty = ty.intTagType(mod); - // Add padding if required. - // TODO: Add to type generation as well? - const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, target); - const padding = padded_field_end - unpadded_field_end; - try self.addUndef(padding); - } + try self.lower(int_ty, int_val); + }, + .float => try self.addFloat(ty, val), + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| try self.addDeclRef(ty, decl), + .mut_decl => |mut_decl| try self.addDeclRef(ty, mut_decl.decl), + else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), + } + if (ptr.len != .none) { + try self.addInt(Type.usize, ptr.len.toValue()); } }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); - const has_payload = !val.isNull(); - const abi_size = ty.abiSize(target); + .opt => { + const payload_ty = ty.optionalChild(mod); + const payload_val = val.optionalValue(mod); + const abi_size = ty.abiSize(mod); - if (!payload_ty.hasRuntimeBits()) { - try self.addConstBool(has_payload); + if (!payload_ty.hasRuntimeBits(mod)) { + try self.addConstBool(payload_val != null); return; - } else if (ty.optionalReprIsPayload()) { + } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); - } else if (has_payload) { - try self.lower(payload_ty, val); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { const ptr_ty_ref = try dg.resolveType(ty, .indirect); try self.addNullPtr(ptr_ty_ref); @@ -729,102 +738,98 @@ pub const DeclGen = struct { // Subtract 1 for @sizeOf(bool). // TODO: Make this not hardcoded. - const payload_size = payload_ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { try self.addUndef(payload_size); } - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); try self.addUndef(padding); }, - .Enum => { - var int_val_buffer: Value.Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &int_val_buffer); + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => |array_type| { + const elem_ty = array_type.child.toType(); + switch (aggregate.storage) { + .bytes => |bytes| try self.addBytes(bytes), + .elems, .repeated_elem => { + for (0..array_type.len) |i| { + try self.lower(elem_ty, switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(), + .repeated_elem => |elem_val| elem_val.toValue(), + }); + } + }, + } + if (array_type.sentinel != .none) { + try self.lower(elem_ty, array_type.sentinel.toValue()); + } + }, + .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .struct_type => { + const struct_ty = mod.typeToStruct(ty).?; - var int_ty_buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&int_ty_buffer); + if (struct_ty.layout == .Packed) { + return dg.todo("packed struct constants", .{}); + } - try self.lower(int_ty, int_val); + const struct_begin = self.size; + const field_vals = val.castTag(.aggregate).?.data; + for (struct_ty.fields.values(), 0..) |field, i| { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; + try self.lower(field.ty, field_vals[i]); + + // Add padding if required. + // TODO: Add to type generation as well? + const unpadded_field_end = self.size - struct_begin; + const padded_field_end = ty.structFieldOffset(i + 1, mod); + const padding = padded_field_end - unpadded_field_end; + try self.addUndef(padding); + } + }, + .anon_struct_type => unreachable, // TODO + else => unreachable, }, - .Union => { - const tag_and_val = val.castTag(.@"union").?.data; - const layout = ty.unionGetLayout(target); + .un => |un| { + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { - return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + return try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; if (union_ty.layout == .Packed) { return dg.todo("packed union constants", .{}); } - const active_field = ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; + const active_field = ty.unionTagFieldIndex(un.tag.toValue(), dg.module).?; const active_field_ty = union_ty.fields.values()[active_field].ty; const has_tag = layout.tag_size != 0; const tag_first = layout.tag_align >= layout.payload_align; if (has_tag and tag_first) { - try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { - try self.lower(active_field_ty, tag_and_val.val); - break :blk active_field_ty.abiSize(target); + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { + try self.lower(active_field_ty, un.val.toValue()); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; try self.addUndef(payload_padding_len); if (has_tag and !tag_first) { - try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); - }, - .zero => { - // Unactivated error set. - try self.addConstInt(u16, 0); - }, - else => unreachable, - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.initTag(.zero); - - const eu_layout = dg.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - return try self.lower(Type.anyerror, error_val); - } - - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiAlignment(target); - const ty_size = ty.abiSize(target); - const padding = ty_size - payload_size - error_size; - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); - - if (eu_layout.error_first) { - try self.lower(Type.anyerror, error_val); - try self.lower(payload_ty, payload_val); - } else { - try self.lower(payload_ty, payload_val); - try self.lower(Type.anyerror, error_val); - } - - try self.addUndef(padding); - }, - else => |tag| return dg.todo("indirect constant of type {s}", .{@tagName(tag)}), + .memoized_call => unreachable, } } }; @@ -878,7 +883,7 @@ pub const DeclGen = struct { // const target = self.getTarget(); // TODO: Fix the resulting global linking for these paths. - // if (val.isUndef()) { + // if (val.isUndef(mod)) { // // Special case: the entire value is undefined. In this case, we can just // // generate an OpVariable with no initializer. // return try section.emit(self.spv.gpa, .OpVariable, .{ @@ -886,7 +891,7 @@ pub const DeclGen = struct { // .id_result = result_id, // .storage_class = storage_class, // }); - // } else if (ty.abiSize(target) == 0) { + // } else if (ty.abiSize(mod) == 0) { // // Special case: if the type has no size, then return an undefined pointer. // return try section.emit(self.spv.gpa, .OpUndef, .{ // .id_result_type = self.typeId(ptr_ty_ref), @@ -968,68 +973,25 @@ pub const DeclGen = struct { /// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default. /// This function should only be called during function code generation. fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { - const target = self.getTarget(); + const mod = self.module; const result_ty_ref = try self.resolveType(ty, repr); log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) }); - if (val.isUndef()) { + if (val.isUndef(mod)) { return self.spv.constUndef(result_ty_ref); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => { - if (ty.isSignedInt()) { - return try self.spv.constInt(result_ty_ref, val.toSignedInt(target)); + if (ty.isSignedInt(mod)) { + return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod)); } else { - return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target)); + return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod)); } }, - .Bool => switch (repr) { - .direct => return try self.spv.constBool(result_ty_ref, val.toBool()), - .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool())), - }, - .Float => return switch (ty.floatBits(target)) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64) } } }), - 80, 128 => unreachable, // TODO - else => unreachable, - }, - .ErrorSet => { - const value = switch (val.tag()) { - .@"error" => blk: { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try self.module.getErrorValue(err_name); - break :blk @intCast(u16, kv.value); - }, - .zero => 0, - else => unreachable, - }; - - return try self.spv.constInt(result_ty_ref, value); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.initTag(.zero); - - const eu_layout = self.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - return try self.constant(Type.anyerror, error_val, repr); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); - - var members: [2]IdRef = undefined; - if (eu_layout.error_first) { - members[0] = try self.constant(Type.anyerror, error_val, .indirect); - members[1] = try self.constant(payload_ty, payload_val, .indirect); - } else { - members[0] = try self.constant(payload_ty, payload_val, .indirect); - members[1] = try self.constant(Type.anyerror, error_val, .indirect); - } - return try self.spv.constComposite(result_ty_ref, &members); + .Bool => { + @compileError("TODO merge conflict failure"); }, // TODO: We can handle most pointers here (decl refs etc), because now they emit an extra // OpVariable that is not really required. @@ -1037,7 +999,7 @@ pub const DeclGen = struct { // The value cannot be generated directly, so generate it as an indirect constant, // and then perform an OpLoad. const result_id = self.spv.allocId(); - const alignment = ty.abiAlignment(target); + const alignment = ty.abiAlignment(mod); const spv_decl_index = try self.spv.allocDecl(.global); try self.lowerIndirectConstant( @@ -1114,9 +1076,9 @@ pub const DeclGen = struct { /// NOTE: When the active field is set to something other than the most aligned field, the /// resulting struct will be *underaligned*. fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef { - const target = self.getTarget(); - const layout = ty.unionGetLayout(target); - const union_ty = ty.cast(Type.Payload.Union).?.data; + const mod = self.module; + const layout = ty.unionGetLayout(mod); + const union_ty = mod.typeToUnion(ty).?; if (union_ty.layout == .Packed) { return self.todo("packed union types", .{}); @@ -1143,11 +1105,11 @@ pub const DeclGen = struct { const active_field = maybe_active_field orelse layout.most_aligned_field; const active_field_ty = union_ty.fields.values()[active_field].ty; - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect); member_types.appendAssumeCapacity(active_payload_ty_ref); member_names.appendAssumeCapacity(try self.spv.resolveString("payload")); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -1177,21 +1139,21 @@ pub const DeclGen = struct { /// Turn a Zig type into a SPIR-V Type, and return a reference to it. fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef { + const mod = self.module; log.debug("resolveType: ty = {}", .{ty.fmt(self.module)}); const target = self.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return try self.spv.resolve(.void_type), .Bool => switch (repr) { .direct => return try self.spv.resolve(.bool_type), .indirect => return try self.intType(.unsigned, 1), }, .Int => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buffer); + const tag_ty = ty.intTagType(mod); return self.resolveType(tag_ty, repr); }, .Float => { @@ -1213,17 +1175,18 @@ pub const DeclGen = struct { return try self.spv.resolve(.{ .float_type = .{ .bits = bits } }); }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse { - return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()}); + const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse { + return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; return self.spv.arrayType(total_len, elem_ty_ref); }, .Fn => switch (repr) { .direct => { + const fn_info = mod.typeToFunc(ty).?; // TODO: Put this somewhere in Sema.zig - if (ty.fnIsVarArgs()) + if (fn_info.is_var_args) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen()); @@ -1245,7 +1208,7 @@ pub const DeclGen = struct { }, }, .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); const storage_class = spvStorageClass(ptr_info.@"addrspace"); const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect); @@ -1277,8 +1240,8 @@ pub const DeclGen = struct { // TODO: Properly verify sizes and child type. return try self.spv.resolve(.{ .vector_type = .{ - .component_type = try self.resolveType(ty.elemType(), repr), - .component_count = @intCast(u32, ty.vectorLen()), + .component_type = try self.resolveType(ty.childType(mod), repr), + .component_count = @intCast(u32, ty.vectorLen(mod)), } }); }, .Struct => { @@ -1290,7 +1253,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field_ty, .indirect); member_index += 1; @@ -1301,7 +1264,7 @@ pub const DeclGen = struct { } }); } - const struct_ty = ty.castTag(.@"struct").?.data; + const struct_ty = mod.typeToStruct(ty).?; if (struct_ty.layout == .Packed) { return try self.resolveType(struct_ty.backing_int_ty, .direct); @@ -1314,16 +1277,16 @@ pub const DeclGen = struct { defer self.gpa.free(member_names); var member_index: usize = 0; - for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + const struct_obj = void; // TODO + for (struct_obj.fields.values(), 0..) |field, i| { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field.ty, .indirect); member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]); member_index += 1; } - const name = try struct_ty.getFullyQualifiedName(self.module); - defer self.module.gpa.free(name); + const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(self.module)); return try self.spv.resolve(.{ .struct_type = .{ .name = try self.spv.resolveString(name), @@ -1332,9 +1295,8 @@ pub const DeclGen = struct { } }); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const payload_ty = ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1342,7 +1304,7 @@ pub const DeclGen = struct { } const payload_ty_ref = try self.resolveType(payload_ty, .indirect); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { // Optional is actually a pointer or a slice. return payload_ty_ref; } @@ -1360,7 +1322,7 @@ pub const DeclGen = struct { .Union => return try self.resolveUnionType(ty, null), .ErrorSet => return try self.intType(.unsigned, 16), .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); const error_ty_ref = try self.resolveType(Type.anyerror, .indirect); const eu_layout = self.errorUnionLayout(payload_ty); @@ -1445,14 +1407,14 @@ pub const DeclGen = struct { }; fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { - const target = self.getTarget(); + const mod = self.module; - const error_align = Type.anyerror.abiAlignment(target); - const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_align = payload_ty.abiAlignment(mod); const error_first = error_align > payload_align; return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod), .error_first = error_first, }; } @@ -1529,28 +1491,28 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { - const decl = self.module.declPtr(self.decl_index); + if (true) @panic("TODO: update SPIR-V backend for InternPool changes"); + const mod = self.module; + const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); const decl_id = self.spv.declPtr(spv_decl_index).result_id; - log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); - if (decl.val.castTag(.function)) |_| { - assert(decl.ty.zigTypeTag() == .Fn); + if (decl.val.getFunction(mod)) |_| { + assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), + .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)), .id_result = decl_id, .function_control = .{}, // TODO: We can set inline here if the type requires it. .function_type = prototype_id, }); - const params = decl.ty.fnParamLen(); - var i: usize = 0; + const fn_info = mod.typeToFunc(decl.ty).?; - try self.args.ensureUnusedCapacity(self.gpa, params); - while (i < params) : (i += 1) { - const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i)); + try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); + for (fn_info.param_types) |param_type| { + const param_type_id = try self.resolveTypeId(param_type.toType()); const arg_result_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, @@ -1576,8 +1538,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.getFullyQualifiedName(self.module); - defer self.module.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(self.module)); try self.spv.sections.debug_names.emit(self.gpa, .OpName, .{ .target = decl_id, @@ -1589,12 +1550,12 @@ pub const DeclGen = struct { try self.generateTestEntryPoint(fqn, spv_decl_index); } } else { - const init_val = if (decl.val.castTag(.variable)) |payload| - payload.data.init + const init_val = if (decl.val.getVariable(mod)) |payload| + payload.init.toValue() else decl.val; - if (init_val.tag() == .unreachable_value) { + if (init_val.ip_index == .unreachable_value) { return self.todo("importing extern variables", .{}); } @@ -1634,7 +1595,8 @@ pub const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const direct_bool_ty_ref = try self.resolveType(ty, .direct); const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); @@ -1655,7 +1617,8 @@ pub const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); break :blk self.boolToInt(indirect_bool_ty_ref, operand_id); @@ -1679,11 +1642,12 @@ pub const DeclGen = struct { } fn load(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef) !IdRef { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = self.typeId(indirect_value_ty_ref), @@ -1695,10 +1659,11 @@ pub const DeclGen = struct { } fn store(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, value_id: IdRef) !void { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, @@ -1714,10 +1679,11 @@ pub const DeclGen = struct { } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; + const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; - } const air_tags = self.air.instructions.items(.tag); const maybe_result_id: ?IdRef = switch (air_tags[inst]) { @@ -1794,8 +1760,6 @@ pub const DeclGen = struct { .br => return self.airBr(inst), .breakpoint => return, .cond_br => return self.airCondBr(inst), - .constant => unreachable, - .const_ty => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), @@ -1841,7 +1805,7 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); try self.func.body.emit(self.spv.gpa, opcode, .{ .id_result_type = result_type_id, .id_result = result_id, @@ -1856,7 +1820,7 @@ pub const DeclGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); // the shift and the base must be the same type in SPIR-V, but in Zig the shift is a smaller int. const shift_id = self.spv.allocId(); @@ -1901,15 +1865,15 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; var lhs_id = try self.resolve(bin_op.lhs); var rhs_id = try self.resolve(bin_op.rhs); const result_ty_ref = try self.resolveType(ty, .direct); - assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module)); - assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.lhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.rhs).eql(ty, self.module)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -1965,8 +1929,8 @@ pub const DeclGen = struct { const lhs = try self.resolve(extra.lhs); const rhs = try self.resolve(extra.rhs); - const operand_ty = self.air.typeOf(extra.lhs); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(extra.lhs); + const result_ty = self.typeOfIndex(inst); const info = try self.arithmeticTypeInfo(operand_ty); switch (info.class) { @@ -2056,15 +2020,16 @@ pub const DeclGen = struct { } fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; if (self.liveness.isUnused(inst)) return null; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2078,12 +2043,11 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.module, i, &buf); - if (elem.isUndef()) { + const elem = try mask.elemValue(mod, i); + if (elem.isUndef(mod)) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { - const int = elem.toSignedInt(self.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); self.func.body.writeOperand(spec.LiteralInteger, unsigned); } @@ -2130,9 +2094,10 @@ pub const DeclGen = struct { } fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { + const mod = self.module; const result_ty_ref = try self.resolveType(result_ty, .direct); - switch (ptr_ty.ptrSize()) { + switch (ptr_ty.ptrSize(mod)) { .One => { // Pointer to array // TODO: Is this correct? @@ -2155,8 +2120,8 @@ pub const DeclGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOf(bin_op.lhs); + const result_ty = self.typeOfIndex(inst); return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id); } @@ -2166,11 +2131,11 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const offset_ty = self.air.typeOf(bin_op.rhs); + const offset_ty = self.typeOf(bin_op.rhs); const offset_ty_ref = try self.resolveType(offset_ty, .direct); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const negative_offset_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSNegate, .{ @@ -2189,13 +2154,13 @@ pub const DeclGen = struct { lhs_id: IdRef, rhs_id: IdRef, ) !IdRef { + const mod = self.module; var cmp_lhs_id = lhs_id; var cmp_rhs_id = rhs_id; const opcode: Opcode = opcode: { - var int_buffer: Type.Payload.Bits = undefined; - const op_ty = switch (ty.zigTypeTag()) { + const op_ty = switch (ty.zigTypeTag(mod)) { .Int, .Bool, .Float => ty, - .Enum => ty.intTagType(&int_buffer), + .Enum => ty.intTagType(), .ErrorSet => Type.u16, .Pointer => blk: { // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are @@ -2291,8 +2256,8 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const bool_ty_id = try self.resolveTypeId(Type.bool); - const ty = self.air.typeOf(bin_op.lhs); - assert(ty.eql(self.air.typeOf(bin_op.rhs), self.module)); + const ty = self.typeOf(bin_op.lhs); + assert(ty.eql(self.typeOf(bin_op.rhs), self.module)); return try self.cmp(op, bool_ty_id, ty, lhs_id, rhs_id); } @@ -2303,13 +2268,14 @@ pub const DeclGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { + const mod = self.module; const dst_ty_ref = try self.resolveType(dst_ty, .direct); const result_id = self.spv.allocId(); // TODO: Some more cases are missing here // See fn bitCast in llvm.zig - if (src_ty.zigTypeTag() == .Int and dst_ty.isPtrAtRuntime()) { + if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) { try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = self.typeId(dst_ty_ref), .id_result = result_id, @@ -2329,8 +2295,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const result_ty = self.typeOfIndex(inst); return try self.bitCast(result_ty, operand_ty, operand_id); } @@ -2339,11 +2305,11 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); - const target = self.getTarget(); - const dest_info = dest_ty.intInfo(target); + const mod = self.module; + const dest_info = dest_ty.intInfo(mod); // TODO: Masking? @@ -2383,10 +2349,10 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_id = try self.resolve(ty_op.operand); const operand_info = try self.arithmeticTypeInfo(operand_ty); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const result_id = self.spv.allocId(); @@ -2410,7 +2376,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = try self.arithmeticTypeInfo(dest_ty); const dest_ty_id = try self.resolveTypeId(dest_ty); @@ -2447,20 +2413,21 @@ pub const DeclGen = struct { fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const field_ty = self.air.typeOfIndex(inst); + const field_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); return try self.extractField(field_ty, operand_id, field); } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + const slice_ty = self.typeOf(bin_op.lhs); + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2468,15 +2435,16 @@ pub const DeclGen = struct { } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + const slice_ty = self.typeOf(bin_op.lhs); + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf); + const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf, mod); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2485,11 +2453,12 @@ pub const DeclGen = struct { } fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { + const mod = self.module; // Construct new pointer type for the resulting pointer - const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T. + const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); - if (ptr_ty.isSinglePointer()) { + const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod))); + if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. return try self.accessChain(elem_ptr_ty_ref, ptr_id, &.{index_id}); @@ -2502,12 +2471,13 @@ pub const DeclGen = struct { fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = ptr_ty.childType(mod); // TODO: Make this return a null ptr or something - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2515,8 +2485,9 @@ pub const DeclGen = struct { } fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2525,19 +2496,19 @@ pub const DeclGen = struct { // If we have a pointer-to-array, construct an element pointer to use with load() // If we pass ptr_ty directly, it will attempt to load the entire array rather than // just an element. - var elem_ptr_info = ptr_ty.ptrInfo(); - elem_ptr_info.data.size = .One; - const elem_ptr_ty = Type.initPayload(&elem_ptr_info.base); + var elem_ptr_info = ptr_ty.ptrInfo(mod); + elem_ptr_info.size = .One; + const elem_ptr_ty = try Type.ptr(undefined, mod, elem_ptr_info); return try self.load(elem_ptr_ty, elem_ptr_id); } fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); - const target = self.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const mod = self.module; + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolve(ty_op.operand); @@ -2551,17 +2522,18 @@ pub const DeclGen = struct { fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet. + assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet. return try self.extractField(field_ty, object_id, field_index); } @@ -2573,9 +2545,10 @@ pub const DeclGen = struct { object_ptr: IdRef, field_index: u32, ) !?IdRef { - const object_ty = object_ptr_ty.childType(); - switch (object_ty.zigTypeTag()) { - .Struct => switch (object_ty.containerLayout()) { + const mod = self.module; + const object_ty = object_ptr_ty.childType(mod); + switch (object_ty.zigTypeTag(mod)) { + .Struct => switch (object_ty.containerLayout(mod)) { .Packed => unreachable, // TODO else => { const field_index_ty_ref = try self.intType(.unsigned, 32); @@ -2592,8 +2565,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolve(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); - const result_ptr_ty = self.air.typeOfIndex(inst); + const struct_ptr_ty = self.typeOf(ty_op.operand); + const result_ptr_ty = self.typeOfIndex(inst); return try self.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index); } @@ -2649,9 +2622,10 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const ptr_ty = self.air.typeOfIndex(inst); - assert(ptr_ty.ptrAddressSpace() == .generic); - const child_ty = ptr_ty.childType(); + const mod = self.module; + const ptr_ty = self.typeOfIndex(inst); + assert(ptr_ty.ptrAddressSpace(mod) == .generic); + const child_ty = ptr_ty.childType(mod); const child_ty_ref = try self.resolveType(child_ty, .indirect); return try self.alloc(child_ty_ref, null); } @@ -2667,6 +2641,7 @@ pub const DeclGen = struct { // the current block by first generating the code of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. + const mod = self.module; const label_id = self.spv.allocId(); // 4 chosen as arbitrary initial capacity. @@ -2681,7 +2656,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.gpa); } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -2690,7 +2665,7 @@ pub const DeclGen = struct { try self.beginSpvBlock(label_id); // If this block didn't produce a value, simply return here. - if (!ty.hasRuntimeBitsIgnoreComptime()) + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return null; // Combine the result from the blocks using the Phi instruction. @@ -2714,9 +2689,10 @@ pub const DeclGen = struct { fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.typeOf(br.operand); + const operand_ty = self.typeOf(br.operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); @@ -2753,7 +2729,10 @@ pub const DeclGen = struct { fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index)); + const src_fname_id = try self.spv.resolveSourceFileName( + self.module, + self.module.declPtr(self.decl_index), + ); try self.func.body.emit(self.spv.gpa, .OpLine, .{ .file = src_fname_id, .line = dbg_stmt.line, @@ -2762,22 +2741,24 @@ pub const DeclGen = struct { } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); - if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; return try self.load(ptr_ty, operand); } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -2804,8 +2785,9 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; - const operand_ty = self.air.typeOf(operand); - if (operand_ty.hasRuntimeBits()) { + const operand_ty = self.typeOf(operand); + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } else { @@ -2814,11 +2796,12 @@ pub const DeclGen = struct { } fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ptr_ty = self.typeOf(un_op); + const ret_ty = ptr_ty.childType(mod); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); return; } @@ -2831,20 +2814,21 @@ pub const DeclGen = struct { } fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union_id = try self.resolve(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); const bool_ty_ref = try self.resolveType(Type.bool, .direct); const eu_layout = self.errorUnionLayout(payload_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const err_id = if (eu_layout.payload_has_bits) try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex()) else @@ -2892,17 +2876,18 @@ pub const DeclGen = struct { fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // No error possible, so just return undefined. return try self.spv.constUndef(err_ty_ref); } - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2916,9 +2901,10 @@ pub const DeclGen = struct { fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOfIndex(inst); - const payload_ty = err_union_ty.errorUnionPayload(); + const err_union_ty = self.typeOfIndex(inst); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); @@ -2946,25 +2932,24 @@ pub const DeclGen = struct { fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); - const optional_ty = self.air.typeOf(un_op); + const optional_ty = self.typeOf(un_op); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); const bool_ty_ref = try self.resolveType(Type.bool, .direct); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = if (payload_ty.isSlice()) - payload_ty.slicePtrFieldType(&ptr_buf) + const ptr_ty = if (payload_ty.isSlice(mod)) + payload_ty.slicePtrFieldType(mod) else payload_ty; - const ptr_id = if (payload_ty.isSlice()) + const ptr_id = if (payload_ty.isSlice(mod)) try self.extractField(Type.bool, operand_id, 0) else operand_id; @@ -2985,7 +2970,7 @@ pub const DeclGen = struct { return result_id; } - const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime()) + const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -3009,14 +2994,15 @@ pub const DeclGen = struct { fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3026,16 +3012,17 @@ pub const DeclGen = struct { fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + const optional_ty = self.typeOfIndex(inst); + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3045,30 +3032,29 @@ pub const DeclGen = struct { } fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const target = self.getTarget(); + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const cond_words: u32 = switch (cond_ty.zigTypeTag()) { + const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { .Int => blk: { - const bits = cond_ty.intInfo(target).bits; + const bits = cond_ty.intInfo(mod).bits; const backing_bits = self.backingIntBits(bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - var buffer: Type.Payload.Bits = undefined; - const int_ty = cond_ty.intTagType(&buffer); - const int_info = int_ty.intInfo(target); + const int_ty = cond_ty.intTagType(mod); + const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, - else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers. + else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers. }; const num_cases = switch_br.data.cases_len; @@ -3112,15 +3098,14 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item) orelse { + const value = (try self.air.value(item, mod)) orelse { return self.todo("switch on runtime value???", .{}); }; - const int_val = switch (cond_ty.zigTypeTag()) { - .Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target), + const int_val = switch (cond_ty.zigTypeTag(mod)) { + .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { - var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants + break :blk (try value.enumToInt(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; @@ -3164,6 +3149,7 @@ pub const DeclGen = struct { } fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -3246,7 +3232,7 @@ pub const DeclGen = struct { assert(as.errors.items.len != 0); assert(self.error_msg == null); const loc = LazySrcLoc.nodeOffset(0); - const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index)); + const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index), mod); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); @@ -3294,19 +3280,20 @@ pub const DeclGen = struct { fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const callee_ty = self.typeOf(pl_op.operand); + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_ty).?; const return_type = fn_info.return_type; - const result_type_id = try self.resolveTypeId(return_type); + const result_type_id = try self.resolveTypeId(return_type.toType()); const result_id = self.spv.allocId(); const callee_id = try self.resolve(pl_op.operand); @@ -3319,8 +3306,8 @@ pub const DeclGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_id = try self.resolve(arg); - const arg_ty = self.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + const arg_ty = self.typeOf(arg); + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; n_params += 1; @@ -3333,14 +3320,24 @@ pub const DeclGen = struct { .id_ref_3 = params[0..n_params], }); - if (return_type.isNoReturn()) { + if (return_type == .noreturn_type) { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { return null; } return result_id; } + + fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { + const mod = self.module; + return self.air.typeOf(inst, &mod.intern_pool); + } + + fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { + const mod = self.module; + return self.air.typeOfIndex(inst, &mod.intern_pool); + } }; diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 1d4840aeb7..d53dcb4368 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -11,7 +11,8 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const ZigDecl = @import("../../Module.zig").Decl; +const ZigModule = @import("../../Module.zig"); +const ZigDecl = ZigModule.Decl; const spec = @import("spec.zig"); const Word = spec.Word; @@ -389,8 +390,8 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void { /// Fetch the result-id of an OpString instruction that encodes the path of the source /// file of the decl. This function may also emit an OpSource with source-level information regarding /// the decl. -pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef { - const path = decl.getFileScope().sub_file_path; +pub fn resolveSourceFileName(self: *Module, zig_module: *ZigModule, zig_decl: *ZigDecl) !IdRef { + const path = zig_decl.getFileScope(zig_module).sub_file_path; const result = try self.source_file_names.getOrPut(self.gpa, path); if (!result.found_existing) { const file_result_id = self.allocId(); diff --git a/src/crash_report.zig b/src/crash_report.zig index b2e3018de6..57b870c198 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -99,7 +99,7 @@ fn dumpStatusReport() !void { allocator, anal.body, anal.body_index, - block.namespace.file_scope, + mod.namespacePtr(block.namespace).file_scope, block_src_decl.src_node, 6, // indent stderr, @@ -108,7 +108,7 @@ fn dumpStatusReport() !void { else => |e| return e, }; try stderr.writeAll(" For full context, use the command\n zig ast-check -t "); - try writeFilePath(block.namespace.file_scope, stderr); + try writeFilePath(mod.namespacePtr(block.namespace).file_scope, stderr); try stderr.writeAll("\n\n"); var parent = anal.parent; @@ -121,7 +121,7 @@ fn dumpStatusReport() !void { print_zir.renderSingleInstruction( allocator, curr.body[curr.body_index], - curr.block.namespace.file_scope, + mod.namespacePtr(curr.block.namespace).file_scope, curr_block_src_decl.src_node, 6, // indent stderr, @@ -148,7 +148,7 @@ fn writeFilePath(file: *Module.File, stream: anytype) !void { } fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void { - try writeFilePath(decl.getFileScope(), stream); + try writeFilePath(decl.getFileScope(mod), stream); try stream.writeAll(": "); try decl.renderFullyQualifiedDebugName(mod, stream); } diff --git a/src/link.zig b/src/link.zig index 471b26ae9f..c184f7ed7c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -502,8 +502,6 @@ pub const File = struct { /// of the final binary. pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: Module.Decl.Index) UpdateDeclError!u32 { if (build_options.only_c) @compileError("unreachable"); - const decl = base.options.module.?.declPtr(decl_index); - log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name }); switch (base.tag) { // zig fmt: off .coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl_index), @@ -543,7 +541,6 @@ pub const File = struct { /// May be called before or after updateDeclExports for any given Decl. pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); - log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmt(module) }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -564,34 +561,27 @@ pub const File = struct { } /// May be called before or after updateDeclExports for any given Decl. - pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { - const owner_decl = module.declPtr(func.owner_decl); - log.debug("updateFunc {*} ({s}), type={}", .{ - owner_decl, owner_decl.name, owner_decl.ty.fmt(module), - }); + pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { if (build_options.only_c) { assert(base.tag == .c); - return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness); + return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness); } switch (base.tag) { // zig fmt: off - .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), - .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), - .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), - .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), - .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), - .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), - .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness), + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func_index, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func_index, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func_index, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func_index, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func_index, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func_index, air, liveness), + .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func_index, air, liveness), // zig fmt: on } } pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); - log.debug("updateDeclLineNumber {*} ({s}), line={}", .{ - decl, decl.name, decl.src_line + 1, - }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -867,7 +857,6 @@ pub const File = struct { exports: []const *Module.Export, ) UpdateDeclExportsError!void { const decl = module.declPtr(decl_index); - log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -1124,13 +1113,13 @@ pub const File = struct { pub fn initDecl(kind: Kind, decl: ?Module.Decl.Index, mod: *Module) LazySymbol { return .{ .kind = kind, .ty = if (decl) |decl_index| - mod.declPtr(decl_index).val.castTag(.ty).?.data + mod.declPtr(decl_index).val.toType() else Type.anyerror }; } - pub fn getDecl(self: LazySymbol) Module.Decl.OptionalIndex { - return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull()); + pub fn getDecl(self: LazySymbol, mod: *Module) Module.Decl.OptionalIndex { + return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull(mod)); } }; diff --git a/src/link/C.zig b/src/link/C.zig index 1a25bfe231..9a42daa061 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -6,6 +6,7 @@ const fs = std.fs; const C = @This(); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); @@ -87,12 +88,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { } } -pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.allocator; + const func = module.funcPtr(func_index); const decl_index = func.owner_decl; const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { @@ -111,7 +113,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .value_map = codegen.CValueMap.init(gpa), .air = air, .liveness = liveness, - .func = func, + .func_index = func_index, .object = .{ .dg = .{ .gpa = gpa, @@ -288,11 +290,11 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo } { - var export_names = std.StringHashMapUnmanaged(void){}; + var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len)); for (module.decl_exports.values()) |exports| for (exports.items) |@"export"| - try export_names.put(gpa, @"export".options.name, {}); + try export_names.put(gpa, @"export".opts.name, {}); while (f.remaining_decls.popOrNull()) |kv| { const decl_index = kv.key; @@ -552,10 +554,11 @@ fn flushDecl( self: *C, f: *Flush, decl_index: Module.Decl.Index, - export_names: std.StringHashMapUnmanaged(void), + export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), ) FlushDeclError!void { const gpa = self.base.allocator; - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -569,7 +572,7 @@ fn flushDecl( try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) + if (!(decl.isExtern(mod) and export_names.contains(decl.name))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 62a208406e..f7785858dd 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,20 +1032,21 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(module, func, air, liveness); + return llvm_object.updateFunc(mod, func_index, air, liveness); } } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -1056,8 +1057,8 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live const res = try codegen.generateFunction( &self.base, - decl.srcLoc(), - func, + decl.srcLoc(mod), + func_index, air, liveness, &code_buffer, @@ -1067,7 +1068,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1076,7 +1077,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { @@ -1096,8 +1097,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom_index = try self.createAtom(); const sym_name = blk: { - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); @@ -1110,7 +1110,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1); } - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); var code = switch (res) { @@ -1123,7 +1123,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In }, }; - const required_alignment = tv.ty.abiAlignment(self.base.options.target); + const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = @intCast(u32, code.len); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); @@ -1141,25 +1141,24 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In pub fn updateDecl( self: *Coff, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, ) link.File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -1172,8 +1171,8 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -1183,7 +1182,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1192,7 +1191,7 @@ pub fn updateDecl( // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -1217,8 +1216,8 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -1262,7 +1261,8 @@ fn updateLazySymbolAtom( } pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -1277,7 +1277,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rdata_section_index.?, }); @@ -1299,10 +1299,11 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom. fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const index: u16 = blk: { - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } @@ -1311,7 +1312,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1322,15 +1323,13 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { } fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, complex_type: coff.ComplexType) !void { - const gpa = self.base.allocator; const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -1410,7 +1409,7 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void { pub fn updateDeclExports( self: *Coff, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) link.File.UpdateDeclExportsError!void { @@ -1418,61 +1417,60 @@ pub fn updateDeclExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } + const ip = &mod.intern_pool; + if (build_options.have_llvm) { // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. for (exports) |exp| { - const exported_decl = module.declPtr(exp.exported_decl); - if (exported_decl.getFunction() == null) continue; + const exported_decl = mod.declPtr(exp.exported_decl); + if (exported_decl.getOwnedFunctionIndex(mod) == .none) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, }; - const decl_cc = exported_decl.ty.fnCallingConvention(); - if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and + const decl_cc = exported_decl.ty.fnCallingConvention(mod); + if (decl_cc == .C and ip.stringEqlSlice(exp.opts.name, "main") and self.base.options.link_libc) { - module.stage1_flags.have_c_main = true; + mod.stage1_flags.have_c_main = true; } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { - if (mem.eql(u8, exp.options.name, "WinMain")) { - module.stage1_flags.have_winmain = true; - } else if (mem.eql(u8, exp.options.name, "wWinMain")) { - module.stage1_flags.have_wwinmain = true; - } else if (mem.eql(u8, exp.options.name, "WinMainCRTStartup")) { - module.stage1_flags.have_winmain_crt_startup = true; - } else if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) { - module.stage1_flags.have_wwinmain_crt_startup = true; - } else if (mem.eql(u8, exp.options.name, "DllMainCRTStartup")) { - module.stage1_flags.have_dllmain_crt_startup = true; + if (ip.stringEqlSlice(exp.opts.name, "WinMain")) { + mod.stage1_flags.have_winmain = true; + } else if (ip.stringEqlSlice(exp.opts.name, "wWinMain")) { + mod.stage1_flags.have_wwinmain = true; + } else if (ip.stringEqlSlice(exp.opts.name, "WinMainCRTStartup")) { + mod.stage1_flags.have_winmain_crt_startup = true; + } else if (ip.stringEqlSlice(exp.opts.name, "wWinMainCRTStartup")) { + mod.stage1_flags.have_wwinmain_crt_startup = true; + } else if (ip.stringEqlSlice(exp.opts.name, "DllMainCRTStartup")) { + mod.stage1_flags.have_dllmain_crt_startup = true; } } } - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } - const tracy = trace(@src()); - defer tracy.end(); - const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - log.debug("adding new export '{s}'", .{exp.options.name}); + log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)}); - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}, ), @@ -1481,13 +1479,13 @@ pub fn updateDeclExports( } } - if (exp.options.linkage == .LinkOnce) { - try module.failed_exports.putNoClobber( - module.gpa, + if (exp.opts.linkage == .LinkOnce) { + try mod.failed_exports.putNoClobber( + gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}, ), @@ -1495,19 +1493,19 @@ pub fn updateDeclExports( continue; } - const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: { + const sym_index = decl_metadata.getExport(self, mod.intern_pool.stringToSlice(exp.opts.name)) orelse blk: { const sym_index = try self.allocateSymbol(); try decl_metadata.exports.append(gpa, sym_index); break :blk sym_index; }; const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; const sym = self.getSymbolPtr(sym_loc); - try self.setSymbolName(sym, exp.options.name); + try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name)); sym.value = decl_sym.value; sym.section_number = @intToEnum(coff.SectionNumber, self.text_section_index.? + 1); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; - switch (exp.options.linkage) { + switch (exp.opts.linkage) { .Strong => { sym.storage_class = .EXTERNAL; }, @@ -1520,9 +1518,15 @@ pub fn updateDeclExports( } } -pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void { +pub fn deleteDeclExport( + self: *Coff, + decl_index: Module.Decl.Index, + name_ip: InternPool.NullTerminatedString, +) void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; + const mod = self.base.options.module.?; + const name = mod.intern_pool.stringToSlice(name_ip); const sym_index = metadata.getExportPtr(self, name) orelse return; const gpa = self.base.allocator; @@ -2538,6 +2542,7 @@ const ImportTable = @import("Coff/ImportTable.zig"); const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Object = @import("Coff/Object.zig"); const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1d358a29ab..b9b7772260 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -18,6 +18,7 @@ const LinkBlock = File.LinkBlock; const LinkFn = File.LinkFn; const LinkerLoad = @import("../codegen.zig").LinkerLoad; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const StringTable = @import("strtab.zig").StringTable; const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; @@ -86,12 +87,7 @@ pub const DeclState = struct { dbg_info: std.ArrayList(u8), abbrev_type_arena: std.heap.ArenaAllocator, abbrev_table: std.ArrayListUnmanaged(AbbrevEntry) = .{}, - abbrev_resolver: std.HashMapUnmanaged( - Type, - u32, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ) = .{}, + abbrev_resolver: std.AutoHashMapUnmanaged(InternPool.Index, u32) = .{}, abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{}, exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{}, @@ -141,9 +137,7 @@ pub const DeclState = struct { /// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section /// which we use as our target of the relocation. fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void { - const resolv = self.abbrev_resolver.getContext(ty, .{ - .mod = self.mod, - }) orelse blk: { + const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: { const sym_index = @intCast(u32, self.abbrev_table.items.len); try self.abbrev_table.append(self.gpa, .{ .atom_index = atom_index, @@ -151,12 +145,8 @@ pub const DeclState = struct { .offset = undefined, }); log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) }); - try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{ - .mod = self.mod, - }); - break :blk self.abbrev_resolver.getContext(ty, .{ - .mod = self.mod, - }).?; + try self.abbrev_resolver.putNoClobber(self.gpa, ty.toIntern(), sym_index); + break :blk sym_index; }; log.debug("{x}: %{d} + 0", .{ offset, resolv }); try self.abbrev_relocs.append(self.gpa, .{ @@ -169,16 +159,16 @@ pub const DeclState = struct { fn addDbgInfoType( self: *DeclState, - module: *Module, + mod: *Module, atom_index: Atom.Index, ty: Type, ) error{OutOfMemory}!void { const arena = self.abbrev_type_arena.allocator(); const dbg_info_buffer = &self.dbg_info; - const target = module.getTarget(); + const target = mod.getTarget(); const target_endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .NoReturn => unreachable, .Void => { try dbg_info_buffer.append(@enumToInt(AbbrevKind.pad1)); @@ -189,12 +179,12 @@ pub const DeclState = struct { // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 @@ -203,31 +193,30 @@ pub const DeclState = struct { .unsigned => DW.ATE.unsigned, }); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.address); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } - var buf = try arena.create(Type.Payload.ElemType); - const payload_ty = ty.optionalChild(buf); + const payload_ty = ty.optionalChild(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = ty.abiSize(target); + const abi_size = ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -251,14 +240,14 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const offset = abi_size - payload_ty.abiSize(target); + const offset = abi_size - payload_ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), offset); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); } }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // Slices are structs: struct { .ptr = *, .len = N } const ptr_bits = target.ptrBitWidth(); const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); @@ -266,9 +255,9 @@ pub const DeclState = struct { try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(5); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -278,8 +267,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - var buf = try arena.create(Type.SlicePtrFieldTypeBuffer); - const ptr_ty = ty.slicePtrFieldType(buf); + const ptr_ty = ty.slicePtrFieldType(mod); try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); @@ -304,18 +292,18 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); } }, .Array => { // DW.AT.array_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); // DW.AT.subrange_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim)); // DW.AT.type, DW.FORM.ref4 @@ -323,7 +311,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); // DW.AT.count, DW.FORM.udata - const len = ty.arrayLenIncludingSentinel(); + const len = ty.arrayLenIncludingSentinel(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), len); // DW.AT.array_type delimit children try dbg_info_buffer.append(0); @@ -332,15 +320,14 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); - switch (ty.tag()) { - .tuple, .anon_struct => { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |fields| { // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); - const fields = ty.tupleFields(); - for (fields.types, 0..) |field, field_index| { + for (fields.types, 0..) |field_ty, field_index| { // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string @@ -348,29 +335,32 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, - else => { + .struct_type => |struct_type| s: { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; // DW.AT.name, DW.FORM.string - const struct_name = try ty.nameAllocArena(arena, module); + const struct_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); - const struct_obj = ty.castTag(.@"struct").?.data; if (struct_obj.layout == .Packed) { log.debug("TODO implement .debug_info for packed structs", .{}); break :blk; } - const fields = ty.structFields(); - for (fields.keys(), 0..) |field_name, field_index| { - const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + for ( + struct_obj.fields.keys(), + struct_obj.fields.values(), + 0.., + ) |field_name_ip, field, field_index| { + if (!field.ty.hasRuntimeBits(mod)) continue; + const field_name = mod.intern_pool.stringToSlice(field_name_ip); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -382,10 +372,11 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, + else => unreachable, } // DW.AT.structure_type delimit children @@ -395,21 +386,16 @@ pub const DeclState = struct { // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - const enum_name = try ty.nameAllocArena(arena, module); + const enum_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); - const fields = ty.enumFields(); - const values: ?Module.EnumFull.ValueMap = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values, - .enum_simple => null, - .enum_numbered => ty.castTag(.enum_numbered).?.data.values, - else => unreachable, - }; - for (fields.keys(), 0..) |field_name, field_i| { + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + for (enum_type.names, 0..) |field_name_index, field_i| { + const field_name = mod.intern_pool.stringToSlice(field_name_index); // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); @@ -417,15 +403,14 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(field_name); dbg_info_buffer.appendAssumeCapacity(0); // DW.AT.const_value, DW.FORM.data8 - const value: u64 = if (values) |vals| value: { - if (vals.count() == 0) break :value @intCast(u64, field_i); // auto-numbered - const value = vals.keys()[field_i]; + const value: u64 = value: { + if (enum_type.values.len == 0) break :value field_i; // auto-numbered + const value = enum_type.values[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - var int_buffer: Value.Payload.U64 = undefined; - const field_int_val = value.enumToInt(ty, &int_buffer); - break :value @bitCast(u64, field_int_val.toSignedInt(target)); - } else @intCast(u64, field_i); + const field_int_val = try value.toValue().enumToInt(ty, mod); + break :value @bitCast(u64, field_int_val.toSignedInt(mod)); + }; mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -433,12 +418,12 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); }, .Union => { - const layout = ty.unionGetLayout(target); - const union_obj = ty.cast(Type.Payload.Union).?.data; + const layout = ty.unionGetLayout(mod); + const union_obj = mod.typeToUnion(ty).?; const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; - const union_name = try ty.nameAllocArena(arena, module); + const union_name = try ty.nameAllocArena(arena, mod); // TODO this is temporary to match current state of unions in Zig - we don't yet have // safety checks implemented meaning the implicit tag is not yet stored and generated @@ -478,14 +463,15 @@ pub const DeclState = struct { try dbg_info_buffer.writer().print("{s}\x00", .{union_name}); } - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.keys()) |field_name| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{s}\x00", .{field_name}); + try dbg_info_buffer.appendSlice(mod.intern_pool.stringToSlice(field_name)); + try dbg_info_buffer.append(0); // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -517,30 +503,30 @@ pub const DeclState = struct { .ErrorSet => { try addDbgInfoErrorSet( self.abbrev_type_arena.allocator(), - module, + mod, ty, target, &self.dbg_info, ); }, .ErrorUnion => { - const error_ty = ty.errorUnionSet(); - const payload_ty = ty.errorUnionPayload(); - const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_size = ty.abiSize(target); - const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0; - const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target); + const error_ty = ty.errorUnionSet(mod); + const payload_ty = ty.errorUnionPayload(mod); + const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_size = ty.abiSize(mod); + const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; + const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); - if (!payload_ty.isNoReturn()) { + if (!payload_ty.isNoReturn(mod)) { // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -685,9 +671,10 @@ pub const DeclState = struct { const atom_index = self.di_atom_decls.get(owner_decl).?; const name_with_null = name.ptr[0 .. name.len + 1]; try dbg_info.append(@enumToInt(AbbrevKind.variable)); - const target = self.mod.getTarget(); + const mod = self.mod; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const child_ty = if (is_ptr) ty.childType() else ty; + const child_ty = if (is_ptr) ty.childType(mod) else ty; switch (loc) { .register => |reg| { @@ -790,9 +777,9 @@ pub const DeclState = struct { const fixup = dbg_info.items.len; dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1, - if (child_ty.isSignedInt()) DW.OP.consts else DW.OP.constu, + if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, }); - if (child_ty.isSignedInt()) { + if (child_ty.isSignedInt(mod)) { try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)); } else { try leb128.writeULEB128(dbg_info.writer(), x); @@ -805,7 +792,7 @@ pub const DeclState = struct { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size = @intCast(u32, child_ty.abiSize(target)); + const abi_size = @intCast(u32, child_ty.abiSize(mod)); var implicit_value_len = std.ArrayList(u8).init(self.gpa); defer implicit_value_len.deinit(); try leb128.writeULEB128(implicit_value_len.writer(), abi_size); @@ -964,8 +951,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("initDeclState {s}{*}", .{ decl_name, decl }); @@ -979,14 +965,14 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index); // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureTotalCapacity(26); - const func = decl.val.castTag(.function).?.data; + const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -1026,8 +1012,8 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) const decl_name_with_null = decl_name[0 .. decl_name.len + 1]; try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(); + const fn_ret_type = decl.ty.fnReturnType(mod); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); } else { @@ -1059,7 +1045,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) pub fn commitDeclState( self: *Dwarf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, sym_addr: u64, sym_size: u64, @@ -1071,12 +1057,12 @@ pub fn commitDeclState( const gpa = self.allocator; var dbg_line_buffer = &decl_state.dbg_line; var dbg_info_buffer = &decl_state.dbg_info; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const target_endian = self.target.cpu.arch.endian(); assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { // Since the Decl is a function, we need to update the .debug_line program. // Perform the relocations based on vaddr. @@ -1271,10 +1257,11 @@ pub fn commitDeclState( const symbol = &decl_state.abbrev_table.items[sym_index]; const ty = symbol.type; const deferred: bool = blk: { - if (ty.isAnyError()) break :blk true; - switch (ty.tag()) { - .error_set_inferred => { - if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true; + if (ty.isAnyError(mod)) break :blk true; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |ies_index| { + const ies = mod.inferredErrorSetPtr(ies_index); + if (!ies.is_resolved) break :blk true; }, else => {}, } @@ -1283,11 +1270,10 @@ pub fn commitDeclState( if (deferred) continue; symbol.offset = @intCast(u32, dbg_info_buffer.items.len); - try decl_state.addDbgInfoType(module, di_atom_index, ty); + try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } - log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name}); try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); while (decl_state.abbrev_relocs.popOrNull()) |reloc| { @@ -1295,10 +1281,11 @@ pub fn commitDeclState( const symbol = decl_state.abbrev_table.items[target]; const ty = symbol.type; const deferred: bool = blk: { - if (ty.isAnyError()) break :blk true; - switch (ty.tag()) { - .error_set_inferred => { - if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true; + if (ty.isAnyError(mod)) break :blk true; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |ies_index| { + const ies = mod.inferredErrorSetPtr(ies_index); + if (!ies.is_resolved) break :blk true; }, else => {}, } @@ -1319,7 +1306,7 @@ pub fn commitDeclState( reloc.offset, value, target, - ty.fmt(module), + ty.fmt(mod), }); mem.writeInt( u32, @@ -1358,7 +1345,6 @@ pub fn commitDeclState( } } - log.debug("writeDeclDebugInfo for '{s}", .{decl.name}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); } @@ -1527,7 +1513,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons } } -pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1535,8 +1521,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De const atom = self.getAtom(.src_fn, atom_index); if (atom.len == 0) return; - const decl = module.declPtr(decl_index); - const func = decl.val.castTag(.function).?.data; + const decl = mod.declPtr(decl_index); + const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -2534,18 +2520,14 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { defer arena_alloc.deinit(); const arena = arena_alloc.allocator(); - const error_set = try arena.create(Module.ErrorSet); - const error_ty = try Type.Tag.error_set.create(arena, error_set); - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(arena, module.global_error_set.count()); - var it = module.global_error_set.keyIterator(); - while (it.next()) |key| { - names.putAssumeCapacityNoClobber(key.*, {}); - } - error_set.names = names; + // TODO: don't create a zig type for this, just make the dwarf info + // without touching the zig type system. + const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys()); + std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); + const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); var dbg_info_buffer = std.ArrayList(u8).init(arena); - try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer); + try addDbgInfoErrorSet(arena, module, error_ty.toType(), self.target, &dbg_info_buffer); const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); @@ -2598,7 +2580,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 { const decl = mod.declPtr(decl_index); - const file_scope = decl.getFileScope(); + const file_scope = decl.getFileScope(mod); const gop = try self.di_files.getOrPut(self.allocator, file_scope); if (!gop.found_existing) { switch (self.bin_file.tag) { @@ -2663,7 +2645,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { fn addDbgInfoErrorSet( arena: Allocator, - module: *Module, + mod: *Module, ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), @@ -2673,10 +2655,10 @@ fn addDbgInfoErrorSet( // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = Type.anyerror.abiSize(target); + const abi_size = Type.anyerror.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); // DW.AT.enumerator @@ -2689,9 +2671,10 @@ fn addDbgInfoErrorSet( // DW.AT.const_value, DW.FORM.data8 mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); - const error_names = ty.errorSetNames(); - for (error_names) |error_name| { - const kv = module.getErrorValue(error_name) catch unreachable; + const error_names = ty.errorSetNames(mod); + for (error_names) |error_name_ip| { + const int = try mod.getErrorValue(error_name_ip); + const error_name = mod.intern_pool.stringToSlice(error_name_ip); // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); @@ -2699,7 +2682,7 @@ fn addDbgInfoErrorSet( dbg_info_buffer.appendSliceAssumeCapacity(error_name); dbg_info_buffer.appendAssumeCapacity(0); // DW.AT.const_value, DW.FORM.data8 - mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), kv.value, target_endian); + mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), int, target_endian); } // DW.AT.enumeration_type delimit children diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 2a28f880ac..409eca6e7a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -28,6 +28,7 @@ const File = link.File; const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const StringTable = @import("strtab.zig").StringTable; const TableSection = @import("table_section.zig").TableSection; @@ -2414,7 +2415,8 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { } pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -2429,7 +2431,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Inde metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rodata_section_index.?, }); @@ -2449,12 +2451,13 @@ pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.I } fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const shdr_index: u16 = blk: { - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } @@ -2463,7 +2466,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rodata_section_index.?; @@ -2478,11 +2481,10 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2572,19 +2574,20 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -2593,28 +2596,28 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( - module, + mod, decl_index, local_sym.st_value, local_sym.st_size, @@ -2624,31 +2627,30 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl( self: *Elf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, ) File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2661,13 +2663,13 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2676,7 +2678,7 @@ pub fn updateDecl( .parent_atom_index = atom.getSymbolIndex().?, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2687,7 +2689,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -2695,7 +2697,7 @@ pub fn updateDecl( const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( - module, + mod, decl_index, local_sym.st_value, local_sym.st_size, @@ -2705,7 +2707,7 @@ pub fn updateDecl( // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -2734,8 +2736,8 @@ fn updateLazySymbolAtom( const atom = self.getAtom(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -2800,8 +2802,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const decl = mod.declPtr(decl_index); const name_str_index = blk: { - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); defer gpa.free(name); @@ -2811,7 +2812,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const atom_index = try self.createAtom(); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, @@ -2826,7 +2827,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const required_alignment = typed_value.ty.abiAlignment(mod); const shdr_index = self.rodata_section_index.?; const phdr_index = self.sections.items(.phdr_index)[shdr_index]; const local_sym = self.getAtom(atom_index).getSymbolPtr(self); @@ -2852,7 +2853,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module pub fn updateDeclExports( self: *Elf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) File.UpdateDeclExportsError!void { @@ -2860,7 +2861,7 @@ pub fn updateDeclExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -2868,7 +2869,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -2878,40 +2879,41 @@ pub fn updateDeclExports( try self.global_symbols.ensureUnusedCapacity(gpa, exports.len); for (exports) |exp| { - if (exp.options.section) |section_name| { - if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); - module.failed_exports.putAssumeCapacityNoClobber( + const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); + if (exp.opts.section.unwrap()) |section_name| { + if (!mod.intern_pool.stringEqlSlice(section_name, ".text")) { + try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); + mod.failed_exports.putAssumeCapacityNoClobber( exp, - try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}), ); continue; } } - const stb_bits: u8 = switch (exp.options.linkage) { + const stb_bits: u8 = switch (exp.opts.linkage) { .Internal => elf.STB_LOCAL, .Strong => blk: { const entry_name = self.base.options.entry orelse "_start"; - if (mem.eql(u8, exp.options.name, entry_name)) { + if (mem.eql(u8, exp_name, entry_name)) { self.entry_addr = decl_sym.st_value; } break :blk elf.STB_GLOBAL; }, .Weak => elf.STB_WEAK, .LinkOnce => { - try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); - module.failed_exports.putAssumeCapacityNoClobber( + try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); + mod.failed_exports.putAssumeCapacityNoClobber( exp, - try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}), ); continue; }, }; const stt_bits: u8 = @truncate(u4, decl_sym.st_info); - if (decl_metadata.getExport(self, exp.options.name)) |i| { + if (decl_metadata.getExport(self, exp_name)) |i| { const sym = &self.global_symbols.items[i]; sym.* = .{ - .st_name = try self.shstrtab.insert(gpa, exp.options.name), + .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, .st_other = 0, .st_shndx = shdr_index, @@ -2925,7 +2927,7 @@ pub fn updateDeclExports( }; try decl_metadata.exports.append(gpa, @intCast(u32, i)); self.global_symbols.items[i] = .{ - .st_name = try self.shstrtab.insert(gpa, exp.options.name), + .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, .st_other = 0, .st_shndx = shdr_index, @@ -2942,8 +2944,7 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); @@ -2953,11 +2954,15 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In } } -pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void { +pub fn deleteDeclExport( + self: *Elf, + decl_index: Module.Decl.Index, + name: InternPool.NullTerminatedString, +) void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; - const sym_index = metadata.getExportPtr(self, name) orelse return; - log.debug("deleting export '{s}'", .{name}); + const mod = self.base.options.module.?; + const sym_index = metadata.getExportPtr(self, mod.intern_pool.stringToSlice(name)) orelse return; self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {}; self.global_symbols.items[sym_index.*].st_info = 0; sym_index.* = 0; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a346ec756f..a3f67bc70a 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -40,6 +40,7 @@ const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Md5 = std.crypto.hash.Md5; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Relocation = @import("MachO/Relocation.zig"); const StringTable = @import("strtab.zig").StringTable; const TableSection = @import("table_section.zig").TableSection; @@ -1847,18 +1848,19 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -1868,23 +1870,23 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv defer code_buffer.deinit(); var decl_state = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(module, decl_index) + try d_sym.dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); var code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1893,7 +1895,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( - module, + mod, decl_index, addr, self.getAtom(atom_index).size, @@ -1903,7 +1905,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 { @@ -1912,16 +1914,15 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - const module = self.base.options.module.?; + const mod = self.base.options.module.?; const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl = module.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(module); - defer gpa.free(decl_name); + const decl = mod.declPtr(decl_index); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const name_str_index = blk: { const index = unnamed_consts.items.len; @@ -1935,20 +1936,20 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu const atom_index = try self.createAtom(); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); var code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const required_alignment = typed_value.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = code.len; // TODO: work out logic for disambiguating functions from function pointers @@ -1971,33 +1972,32 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu return atom.getSymbolIndex().?; } -pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } } - const is_threadlocal = if (decl.val.castTag(.variable)) |payload| - payload.data.is_threadlocal and !self.base.options.single_threaded + const is_threadlocal = if (decl.val.getVariable(mod)) |variable| + variable.is_threadlocal and !self.base.options.single_threaded else false; - if (is_threadlocal) return self.updateThreadlocalVariable(module, decl_index); + if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const sym_index = self.getAtom(atom_index).getSymbolIndex().?; @@ -2007,14 +2007,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) defer code_buffer.deinit(); var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(module, decl_index) + try d_sym.dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2023,7 +2023,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2034,7 +2034,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -2042,7 +2042,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( - module, + mod, decl_index, addr, self.getAtom(atom_index).size, @@ -2052,7 +2052,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -2081,8 +2081,8 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -2126,7 +2126,8 @@ fn updateLazySymbolAtom( } pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -2144,7 +2145,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.data_const_section_index.?, }); @@ -2152,6 +2153,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In } fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { + const mod = self.base.options.module.?; // Lowering a TLV on macOS involves two stages: // 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss) // 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars @@ -2175,9 +2177,9 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl = module.declPtr(decl_index); const decl_metadata = self.decls.get(decl_index).?; - const decl_val = decl.val.castTag(.variable).?.data.init; + const decl_val = decl.val.getVariable(mod).?.init.toValue(); const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2186,7 +2188,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D .parent_atom_index = init_sym_index, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2202,10 +2204,9 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D }, }; - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); - const decl_name = try decl.getFullyQualifiedName(module); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(module)); const init_sym_name = try std.fmt.allocPrint(gpa, "{s}$tlv$init", .{decl_name}); defer gpa.free(init_sym_name); @@ -2262,12 +2263,13 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; const val = decl.val; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const mode = self.base.options.optimize_mode; const single_threaded = self.base.options.single_threaded; const sect_id: u8 = blk: { // TODO finish and audit this function - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { if (mode == .ReleaseFast or mode == .ReleaseSmall) { @panic("TODO __DATA,__bss"); } else { @@ -2275,8 +2277,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { } } - if (val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal and !single_threaded) { + if (val.getVariable(mod)) |variable| { + if (variable.is_threadlocal and !single_threaded) { break :blk self.thread_data_section_index.?; } break :blk self.data_section_index.?; @@ -2286,7 +2288,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.data_const_section_index.?; @@ -2301,10 +2303,9 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2376,7 +2377,7 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.De pub fn updateDeclExports( self: *MachO, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) File.UpdateDeclExportsError!void { @@ -2385,7 +2386,7 @@ pub fn updateDeclExports( } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| - return llvm_object.updateDeclExports(module, decl_index, exports); + return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -2393,26 +2394,28 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name}); + const exp_name = try std.fmt.allocPrint(gpa, "_{}", .{ + exp.opts.name.fmt(&mod.intern_pool), + }); defer gpa.free(exp_name); log.debug("adding new export '{s}'", .{exp_name}); - if (exp.options.section) |section_name| { - if (!mem.eql(u8, section_name, "__text")) { - try module.failed_exports.putNoClobber( - module.gpa, + if (exp.opts.section.unwrap()) |section_name| { + if (!mod.intern_pool.stringEqlSlice(section_name, "__text")) { + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}, ), @@ -2421,13 +2424,13 @@ pub fn updateDeclExports( } } - if (exp.options.linkage == .LinkOnce) { - try module.failed_exports.putNoClobber( - module.gpa, + if (exp.opts.linkage == .LinkOnce) { + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}, ), @@ -2450,7 +2453,7 @@ pub fn updateDeclExports( .n_value = decl_sym.n_value, }; - switch (exp.options.linkage) { + switch (exp.opts.linkage) { .Internal => { // Symbol should be hidden, or in MachO lingo, private extern. // We should also mark the symbol as Weak: n_desc == N_WEAK_DEF. @@ -2471,9 +2474,9 @@ pub fn updateDeclExports( // TODO: this needs rethinking const global = self.getGlobal(exp_name).?; if (sym_loc.sym_index != global.sym_index and global.file != null) { - _ = try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( + _ = try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times , .{exp_name}, @@ -2485,12 +2488,17 @@ pub fn updateDeclExports( } } -pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void { +pub fn deleteDeclExport( + self: *MachO, + decl_index: Module.Decl.Index, + name: InternPool.NullTerminatedString, +) Allocator.Error!void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; const gpa = self.base.allocator; - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name}); + const mod = self.base.options.module.?; + const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{mod.intern_pool.stringToSlice(name)}); defer gpa.free(exp_name); const sym_index = metadata.getExportPtr(self, exp_name) orelse return; diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 69cd73a602..b74518d930 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -68,9 +68,9 @@ pub fn deinit(self: *NvPtx) void { self.base.allocator.free(self.ptx_file_name); } -pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (!build_options.have_llvm) return; - try self.llvm_object.updateFunc(module, func, air, liveness); + try self.llvm_object.updateFunc(module, func_index, air, liveness); } pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 6d74e17dfd..c08754b57a 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -213,14 +213,14 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { const gpa = self.base.allocator; const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope()); + const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope(mod)); if (fn_map_res.found_existing) { if (try fn_map_res.value_ptr.functions.fetchPut(gpa, decl_index, out)) |old_entry| { gpa.free(old_entry.value.code); gpa.free(old_entry.value.lineinfo); } } else { - const file = decl.getFileScope(); + const file = decl.getFileScope(mod); const arena = self.path_arena.allocator(); // each file gets a symbol fn_map_res.value_ptr.* = .{ @@ -276,17 +276,17 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); _ = try self.seeDecl(decl_index); - log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -298,8 +298,8 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv const res = try codegen.generateFunction( &self.base, - decl.srcLoc(), - func, + decl.srcLoc(mod), + func_index, air, liveness, &code_buffer, @@ -316,7 +316,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv .ok => try code_buffer.toOwnedSlice(), .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -344,8 +344,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed @@ -366,7 +365,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = @enumToInt(decl_index), @@ -388,14 +387,13 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I return @intCast(u32, info.got_index.?); } -pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) !void { - const decl = module.declPtr(decl_index); +pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { + const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -403,13 +401,11 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) _ = try self.seeDecl(decl_index); - log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index }); - var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ .none = {} }, .{ @@ -419,7 +415,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -432,9 +428,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) } /// called at the end of update{Decl,Func} fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { - const decl = self.base.options.module.?.declPtr(decl_index); - const is_fn = (decl.ty.zigTypeTag() == .Fn); - log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); @@ -445,7 +441,7 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule .type = decl_block.type, - .name = mem.span(decl.name), + .name = try self.base.allocator.dupe(u8, mod.intern_pool.stringToSlice(decl.name)), }; if (decl_block.sym_index) |s| { @@ -566,10 +562,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); const out = entry.value_ptr.*; - log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line }); { // connect the previous decl to the next const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount); @@ -615,10 +609,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = self.data_decl_table.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); const code = entry.value_ptr.*; - log.debug("write data decl {*} ({s})", .{ decl, decl.name }); foff += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; @@ -694,19 +686,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const source_decl = mod.declPtr(source_decl_index); for (kv.value_ptr.items) |reloc| { const target_decl_index = reloc.target; - const target_decl = mod.declPtr(target_decl_index); const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index); const target_decl_offset = target_decl_block.offset.?; const offset = reloc.offset; const addend = reloc.addend; - log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset }); - const code = blk: { - const is_fn = source_decl.ty.zigTypeTag() == .Fn; + const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { - const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions; + const table = self.fn_decl_table.get(source_decl.getFileScope(mod)).?.functions; const output = table.get(source_decl_index).?; break :blk output.code; } else { @@ -728,7 +717,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No } fn addDeclExports( self: *Plan9, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { @@ -736,12 +725,13 @@ fn addDeclExports( const decl_block = self.getDeclBlock(metadata.index); for (exports) |exp| { + const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); // plan9 does not support custom sections - if (exp.options.section) |section_name| { - if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { - try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( + if (exp.opts.section.unwrap()) |section_name| { + if (!mod.intern_pool.stringEqlSlice(section_name, ".text") and !mod.intern_pool.stringEqlSlice(section_name, ".data")) { + try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( self.base.allocator, - module.declPtr(decl_index).srcLoc(), + mod.declPtr(decl_index).srcLoc(mod), "plan9 does not support extra sections", .{}, )); @@ -751,10 +741,10 @@ fn addDeclExports( const sym = .{ .value = decl_block.offset.?, .type = decl_block.type.toGlobal(), - .name = exp.options.name, + .name = try self.base.allocator.dupe(u8, exp_name), }; - if (metadata.getExport(self, exp.options.name)) |i| { + if (metadata.getExport(self, exp_name)) |i| { self.syms.items[i] = sym; } else { try self.syms.append(self.base.allocator, sym); @@ -770,9 +760,9 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = (decl.val.tag() == .function); + const is_fn = decl.val.getFunctionIndex(mod) != .none; if (is_fn) { - var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?; + var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; if (submap.fetchSwapRemove(decl_index)) |removed_entry| { self.base.allocator.free(removed_entry.value.code); @@ -955,7 +945,10 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void { try w.writeAll(sym.name); try w.writeByte(0); } + pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { + const mod = self.base.options.module.?; + const ip = &mod.intern_pool; const writer = buf.writer(); // write the f symbols { @@ -979,7 +972,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| { try self.writeSym(writer, self.syms.items[exp_i]); }; } @@ -1005,7 +998,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| { const s = self.syms.items[exp_i]; if (mem.eql(u8, s.name, "_start")) self.entry_val = s.value; @@ -1031,7 +1024,7 @@ pub fn getDeclVAddr( ) !u64 { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { var start = self.bases.text; var it_file = self.fn_decl_table.iterator(); while (it_file.next()) |fentry| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index fbdcbd5a8e..5bbd5ebdc0 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -103,11 +103,13 @@ pub fn deinit(self: *SpirV) void { self.decl_link.deinit(); } -pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const func = module.funcPtr(func_index); + var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link); defer decl_gen.deinit(); @@ -131,12 +133,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index) pub fn updateDeclExports( self: *SpirV, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { - const decl = module.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) { + const decl = mod.declPtr(decl_index); + if (decl.val.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { @@ -145,7 +147,7 @@ pub fn updateDeclExports( const spv_decl_index = entry.value_ptr.*; for (exports) |exp| { - try self.spv.declareEntryPoint(spv_decl_index, exp.options.name); + try self.spv.declareEntryPoint(spv_decl_index, mod.intern_pool.stringToSlice(exp.opts.name)); } } @@ -188,7 +190,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No var error_info = std.ArrayList(u8).init(self.spv.arena); try error_info.appendSlice("zig_errors"); const module = self.base.options.module.?; - for (module.error_name_list.items) |name| { + for (module.global_error_set.keys()) |name_nts| { + const name = module.intern_pool.stringToSlice(name_nts); // Errors can contain pretty much any character - to encode them in a string we must escape // them somehow. Easiest here is to use some established scheme, one which also preseves the // name if it contains no strange characters is nice for debugging. URI encoding fits the bill. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index befd2d68c9..fdac7dfa63 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -149,7 +149,8 @@ discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .{}, /// into the final binary. resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolLoc, void) = .{}, /// Symbols that remain undefined after symbol resolution. -undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{}, +/// Note: The key represents an offset into the string table, rather than the actual string. +undefs: std.AutoArrayHashMapUnmanaged(u32, SymbolLoc) = .{}, /// Maps a symbol's location to an atom. This can be used to find meta /// data of a symbol, such as its size, or its offset to perform a relocation. /// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped. @@ -514,6 +515,10 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm { /// Leaves index undefined and the default flags (0). fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !SymbolLoc { const name_offset = try wasm.string_table.put(wasm.base.allocator, name); + return wasm.createSyntheticSymbolOffset(name_offset, tag); +} + +fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc { const sym_index = @intCast(u32, wasm.symbols.items.len); const loc: SymbolLoc = .{ .index = sym_index, .file = null }; try wasm.symbols.append(wasm.base.allocator, .{ @@ -691,7 +696,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void { try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, location, {}); if (symbol.isUndefined()) { - try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name, location); + try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name_index, location); } continue; } @@ -801,7 +806,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void { try wasm.resolved_symbols.put(wasm.base.allocator, location, {}); assert(wasm.resolved_symbols.swapRemove(existing_loc)); if (existing_sym.isUndefined()) { - _ = wasm.undefs.swapRemove(sym_name); + _ = wasm.undefs.swapRemove(sym_name_index); } } } @@ -812,15 +817,16 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { log.debug("Resolving symbols in archives", .{}); var index: u32 = 0; undef_loop: while (index < wasm.undefs.count()) { - const sym_name = wasm.undefs.keys()[index]; + const sym_name_index = wasm.undefs.keys()[index]; for (wasm.archives.items) |archive| { + const sym_name = wasm.string_table.get(sym_name_index); + log.debug("Detected symbol '{s}' in archive '{s}', parsing objects..", .{ sym_name, archive.name }); const offset = archive.toc.get(sym_name) orelse { // symbol does not exist in this archive continue; }; - log.debug("Detected symbol '{s}' in archive '{s}', parsing objects..", .{ sym_name, archive.name }); // Symbol is found in unparsed object file within current archive. // Parse object and and resolve symbols again before we check remaining // undefined symbols. @@ -1191,28 +1197,36 @@ fn validateFeatures( /// if one or multiple undefined references exist. When none exist, the symbol will /// not be created, ensuring we don't unneccesarily emit unreferenced symbols. fn resolveLazySymbols(wasm: *Wasm) !void { - if (wasm.undefs.fetchSwapRemove("__heap_base")) |kv| { - const loc = try wasm.createSyntheticSymbol("__heap_base", .data); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations. + if (wasm.string_table.getOffset("__heap_base")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + _ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations. + } } - if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| { - const loc = try wasm.createSyntheticSymbol("__heap_end", .data); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(loc); + if (wasm.string_table.getOffset("__heap_end")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + _ = wasm.resolved_symbols.swapRemove(loc); + } } if (!wasm.base.options.shared_memory) { - if (wasm.undefs.fetchSwapRemove("__tls_base")) |kv| { - const loc = try wasm.createSyntheticSymbol("__tls_base", .global); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + if (wasm.string_table.getOffset("__tls_base")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .global); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + } } } - if (wasm.undefs.fetchSwapRemove("__zig_errors_len")) |kv| { - const loc = try wasm.createSyntheticSymbol("__zig_errors_len", .data); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(kv.value); + if (wasm.string_table.getOffset("__zig_errors_len")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + _ = wasm.resolved_symbols.swapRemove(kv.value); + } } } @@ -1324,17 +1338,18 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { return index; } -pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); @@ -1348,7 +1363,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes defer code_writer.deinit(); // const result = try codegen.generateFunction( // &wasm.base, - // decl.srcLoc(), + // decl.srcLoc(mod), // func, // air, // liveness, @@ -1357,8 +1372,8 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes // ); const result = try codegen.generateFunction( &wasm.base, - decl.srcLoc(), - func, + decl.srcLoc(mod), + func_index, air, liveness, &code_writer, @@ -1403,9 +1418,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |_| { + if (decl.val.getFunction(mod)) |_| { return; - } else if (decl.val.castTag(.extern_fn)) |_| { + } else if (decl.val.getExternFunc(mod)) |_| { return; } @@ -1413,19 +1428,20 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi const atom = wasm.getAtomPtr(atom_index); atom.clear(); - if (decl.isExtern()) { - const variable = decl.getVariable().?; - const name = mem.sliceTo(decl.name, 0); - return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null); + if (decl.isExtern(mod)) { + const variable = decl.getOwnedVariable(mod).?; + const name = mod.intern_pool.stringToSlice(decl.name); + const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); + return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } - const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; var code_writer = std.ArrayList(u8).init(wasm.base.allocator); defer code_writer.deinit(); const res = try codegen.generateSymbol( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), .{ .ty = decl.ty, .val = val }, &code_writer, .none, @@ -1451,8 +1467,7 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.I defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); try dw.updateDeclLineNumber(mod, decl_index); @@ -1465,15 +1480,14 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 const atom_index = wasm.decls.get(decl_index).?; const atom = wasm.getAtomPtr(atom_index); const symbol = &wasm.symbols.items[atom.sym_index]; - const full_name = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(full_name); + const full_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); symbol.name = try wasm.string_table.put(wasm.base.allocator, full_name); try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); atom.size = @intCast(u32, code.len); if (code.len == 0) return; - atom.alignment = decl.ty.abiAlignment(wasm.base.options.target); + atom.alignment = decl.ty.abiAlignment(mod); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1523,9 +1537,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { - assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions - const mod = wasm.base.options.module.?; + assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); // Create and initialize a new local symbol and atom @@ -1534,16 +1547,17 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const parent_atom = wasm.getAtomPtr(parent_atom_index); const local_index = parent_atom.locals.items.len; try parent_atom.locals.append(wasm.base.allocator, atom_index); - const fqdn = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(fqdn); - const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index }); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); + const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ + fqn, local_index, + }); defer wasm.base.allocator.free(name); var value_bytes = std.ArrayList(u8).init(wasm.base.allocator); defer value_bytes.deinit(); const code = code: { const atom = wasm.getAtomPtr(atom_index); - atom.alignment = tv.ty.abiAlignment(wasm.base.options.target); + atom.alignment = tv.ty.abiAlignment(mod); wasm.symbols.items[atom.sym_index] = .{ .name = try wasm.string_table.put(wasm.base.allocator, name), .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL), @@ -1555,7 +1569,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const result = try codegen.generateSymbol( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), tv, &value_bytes, .none, @@ -1611,7 +1625,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3 wasm.symbols.items[sym_index] = symbol; gop.value_ptr.* = .{ .index = sym_index, .file = null }; try wasm.resolved_symbols.put(wasm.base.allocator, gop.value_ptr.*, {}); - try wasm.undefs.putNoClobber(wasm.base.allocator, name, gop.value_ptr.*); + try wasm.undefs.putNoClobber(wasm.base.allocator, name_index, gop.value_ptr.*); return sym_index; } @@ -1632,7 +1646,7 @@ pub fn getDeclVAddr( const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const atom = wasm.getAtomPtr(atom_index); const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { assert(reloc_info.addend == 0); // addend not allowed for function relocations // We found a function pointer, so add it to our table, // as function pointers are not allowed to be stored inside the data section. @@ -1689,36 +1703,37 @@ pub fn updateDeclExports( const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); const atom = wasm.getAtom(atom_index); + const gpa = mod.gpa; for (exports) |exp| { - if (exp.options.section) |section| { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, - decl.srcLoc(), + if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section| { + try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); continue; } - const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name); + const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.opts.name)); if (wasm.globals.getPtr(export_name)) |existing_loc| { if (existing_loc.index == atom.sym_index) continue; const existing_sym: Symbol = existing_loc.getSymbol(wasm).*; - const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak; + const exp_is_weak = exp.opts.linkage == .Internal or exp.opts.linkage == .Weak; // When both the to-be-exported symbol and the already existing symbol // are strong symbols, we have a linker error. // In the other case we replace one with the other. if (!exp_is_weak and !existing_sym.isWeak()) { - try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, - decl.srcLoc(), - \\LinkError: symbol '{s}' defined multiple times + try mod.failed_exports.put(gpa, exp, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(mod), + \\LinkError: symbol '{}' defined multiple times \\ first definition in '{s}' \\ next definition in '{s}' , - .{ exp.options.name, wasm.name, wasm.name }, + .{ exp.opts.name.fmt(&mod.intern_pool), wasm.name, wasm.name }, )); continue; } else if (exp_is_weak) { @@ -1735,7 +1750,7 @@ pub fn updateDeclExports( const exported_atom = wasm.getAtom(exported_atom_index); const sym_loc = exported_atom.symbolLoc(); const symbol = sym_loc.getSymbol(wasm); - switch (exp.options.linkage) { + switch (exp.opts.linkage) { .Internal => { symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); }, @@ -1744,9 +1759,9 @@ pub fn updateDeclExports( }, .Strong => {}, // symbols are strong by default .LinkOnce => { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, - decl.srcLoc(), + try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(mod), "Unimplemented: LinkOnce", .{}, )); @@ -1754,7 +1769,7 @@ pub fn updateDeclExports( }, } // Ensure the symbol will be exported using the given name - if (!mem.eql(u8, exp.options.name, sym_loc.getName(wasm))) { + if (!mod.intern_pool.stringEqlSlice(exp.opts.name, sym_loc.getName(wasm))) { try wasm.export_names.put(wasm.base.allocator, sym_loc, export_name); } @@ -1768,7 +1783,7 @@ pub fn updateDeclExports( // if the symbol was previously undefined, remove it as an import _ = wasm.imports.remove(sym_loc); - _ = wasm.undefs.swapRemove(exp.options.name); + _ = wasm.undefs.swapRemove(export_name); } } @@ -1792,7 +1807,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void { assert(wasm.symbol_atom.remove(local_atom.symbolLoc())); } - if (decl.isExtern()) { + if (decl.isExtern(mod)) { _ = wasm.imports.remove(atom.symbolLoc()); } _ = wasm.resolved_symbols.swapRemove(atom.symbolLoc()); @@ -1853,7 +1868,7 @@ pub fn addOrUpdateImport( /// Symbol index that is external symbol_index: u32, /// Optional library name (i.e. `extern "c" fn foo() void` - lib_name: ?[*:0]const u8, + lib_name: ?[:0]const u8, /// The index of the type that represents the function signature /// when the extern is a function. When this is null, a data-symbol /// is asserted instead. @@ -1864,7 +1879,7 @@ pub fn addOrUpdateImport( // Also mangle the name when the lib name is set and not equal to "C" so imports with the same // name but different module can be resolved correctly. const mangle_name = lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c"); + !std.mem.eql(u8, lib_name.?, "c"); const full_name = if (mangle_name) full_name: { break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? }); } else name; @@ -1884,13 +1899,13 @@ pub fn addOrUpdateImport( const loc: SymbolLoc = .{ .file = null, .index = symbol_index }; global_gop.value_ptr.* = loc; try wasm.resolved_symbols.put(wasm.base.allocator, loc, {}); - try wasm.undefs.putNoClobber(wasm.base.allocator, full_name, loc); + try wasm.undefs.putNoClobber(wasm.base.allocator, decl_name_index, loc); } if (type_index) |ty_index| { const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null }); const module_name = if (lib_name) |l_name| blk: { - break :blk mem.sliceTo(l_name, 0); + break :blk l_name; } else wasm.host_name; if (!gop.found_existing) { gop.value_ptr.* = .{ @@ -2932,8 +2947,9 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - atom.alignment = slice_ty.abiAlignment(wasm.base.options.target); + const slice_ty = Type.slice_const_u8_sentinel_0; + const mod = wasm.base.options.module.?; + atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table"); @@ -2985,10 +3001,11 @@ fn populateErrorNameTable(wasm: *Wasm) !void { // Addend for each relocation to the table var addend: u32 = 0; const mod = wasm.base.options.module.?; - for (mod.error_name_list.items) |error_name| { + for (mod.global_error_set.keys()) |error_name_nts| { + const error_name = mod.intern_pool.stringToSlice(error_name_nts); const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.slice_const_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated @@ -3000,7 +3017,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { .offset = offset, .addend = @intCast(i32, addend), }); - atom.size += @intCast(u32, slice_ty.abiSize(wasm.base.options.target)); + atom.size += @intCast(u32, slice_ty.abiSize(mod)); addend += len; // as we updated the error name table, we now store the actual name within the names atom @@ -3366,15 +3383,15 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod var decl_it = wasm.decls.iterator(); while (decl_it.next()) |entry| { const decl = mod.declPtr(entry.key_ptr.*); - if (decl.isExtern()) continue; + if (decl.isExtern(mod)) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); - } else if (decl.getVariable()) |variable| { - if (!variable.is_mutable) { + } else if (decl.getOwnedVariable(mod)) |variable| { + if (variable.is_const) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); - } else if (variable.init.isUndefDeep()) { + } else if (variable.init.toValue().isUndefDeep(mod)) { // for safe build modes, we store the atom in the data segment, // whereas for unsafe build modes we store it in bss. const is_initialized = wasm.base.options.optimize_mode == .Debug or diff --git a/src/main.zig b/src/main.zig index 4acd305ce7..5d666840c0 100644 --- a/src/main.zig +++ b/src/main.zig @@ -569,6 +569,7 @@ const usage_build_generic = \\ --verbose-link Display linker invocations \\ --verbose-cc Display C compiler invocations \\ --verbose-air Enable compiler debug output for Zig AIR + \\ --verbose-intern-pool Enable compiler debug output for InternPool \\ --verbose-llvm-ir[=path] Enable compiler debug output for unoptimized LLVM IR \\ --verbose-llvm-bc=[path] Enable compiler debug output for unoptimized LLVM BC \\ --verbose-cimport Enable compiler debug output for C imports @@ -735,6 +736,7 @@ fn buildOutputType( var verbose_link = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_LINK"); var verbose_cc = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_CC"); var verbose_air = false; + var verbose_intern_pool = false; var verbose_llvm_ir: ?[]const u8 = null; var verbose_llvm_bc: ?[]const u8 = null; var verbose_cimport = false; @@ -1460,6 +1462,8 @@ fn buildOutputType( verbose_cc = true; } else if (mem.eql(u8, arg, "--verbose-air")) { verbose_air = true; + } else if (mem.eql(u8, arg, "--verbose-intern-pool")) { + verbose_intern_pool = true; } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { verbose_llvm_ir = "-"; } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { @@ -3156,6 +3160,7 @@ fn buildOutputType( .verbose_cc = verbose_cc, .verbose_link = verbose_link, .verbose_air = verbose_air, + .verbose_intern_pool = verbose_intern_pool, .verbose_llvm_ir = verbose_llvm_ir, .verbose_llvm_bc = verbose_llvm_bc, .verbose_cimport = verbose_cimport, diff --git a/src/print_air.zig b/src/print_air.zig index 2e8ab1a642..f963ecdd95 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -7,6 +7,7 @@ const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); +const InternPool = @import("InternPool.zig"); pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void { const instruction_bytes = air.instructions.len * @@ -14,12 +15,11 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo // the debug safety tag but we want to measure release size. (@sizeOf(Air.Inst.Tag) + 8); const extra_bytes = air.extra.len * @sizeOf(u32); - const values_bytes = air.values.len * @sizeOf(Value); const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0; const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0; const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0; const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + - values_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + @sizeOf(Liveness) + liveness_extra_bytes + liveness_special_bytes + tomb_bytes; // zig fmt: off @@ -27,7 +27,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo \\# Total AIR+Liveness bytes: {} \\# AIR Instructions: {d} ({}) \\# AIR Extra Data: {d} ({}) - \\# AIR Values Bytes: {d} ({}) \\# Liveness tomb_bits: {} \\# Liveness Extra Data: {d} ({}) \\# Liveness special table: {d} ({}) @@ -36,7 +35,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo fmtIntSizeBin(total_bytes), air.instructions.len, fmtIntSizeBin(instruction_bytes), air.extra.len, fmtIntSizeBin(extra_bytes), - air.values.len, fmtIntSizeBin(values_bytes), fmtIntSizeBin(tomb_bytes), if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes), if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes), @@ -92,14 +90,10 @@ const Writer = struct { fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { for (w.air.instructions.items(.tag), 0..) |tag, i| { + if (tag != .interned) continue; const inst = @intCast(Air.Inst.Index, i); - switch (tag) { - .constant, .const_ty => { - try w.writeInst(s, inst); - try s.writeByte('\n'); - }, - else => continue, - } + try w.writeInst(s, inst); + try s.writeByte('\n'); } } @@ -225,7 +219,6 @@ const Writer = struct { .save_err_return_trace_index, => try w.writeNoOp(s, inst), - .const_ty, .alloc, .ret_ptr, .err_return_trace, @@ -304,7 +297,9 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .constant => try w.writeConstant(s, inst), + .inferred_alloc => @panic("TODO"), + .inferred_alloc_comptime => @panic("TODO"), + .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -364,13 +359,7 @@ const Writer = struct { } fn writeType(w: *Writer, s: anytype, ty: Type) !void { - const t = ty.tag(); - switch (t) { - .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), - .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), - .generic_poison => try s.writeAll("(generic_poison)"), - else => try ty.print(s, w.module), - } + return ty.print(s, w.module); } fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -432,9 +421,10 @@ const Writer = struct { } fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const vector_ty = w.air.getRefType(ty_pl.ty); - const len = @intCast(usize, vector_ty.arrayLen()); + const len = @intCast(usize, vector_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]); try w.writeType(s, vector_ty); @@ -511,10 +501,11 @@ const Writer = struct { } fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.air.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(mod); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); @@ -605,12 +596,12 @@ const Writer = struct { try s.print(", {d}", .{extra.field_index}); } - fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const val = w.air.values[ty_pl.payload]; - const ty = w.air.getRefType(ty_pl.ty); + fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; + const ip_index = w.air.instructions.items(.data)[inst].interned; + const ty = mod.intern_pool.indexToKey(ip_index).typeOf().toType(); try w.writeType(s, ty); - try s.print(", {}", .{val.fmtValue(ty, w.module)}); + try s.print(", {}", .{ip_index.toValue().fmtValue(ty, mod)}); } fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -621,7 +612,7 @@ const Writer = struct { var extra_i: usize = extra.end; var op_index: usize = 0; - const ret_ty = w.air.typeOfIndex(inst); + const ret_ty = w.typeOfIndex(inst); try w.writeType(s, ret_ty); if (is_volatile) { @@ -692,17 +683,17 @@ const Writer = struct { } fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const function = w.air.values[ty_pl.payload].castTag(.function).?.data; - const owner_decl = w.module.declPtr(function.owner_decl); - try s.print("{s}", .{owner_decl.name}); + const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; + const func_index = ty_fn.func; + const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); + try s.print("{}", .{owner_decl.name.fmt(&w.module.intern_pool)}); } fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; try w.writeOperand(s, inst, 0, pl_op.operand); const name = w.air.nullTerminatedString(pl_op.payload); - try s.print(", {s}", .{name}); + try s.print(", \"{}\"", .{std.zig.fmtEscapes(name)}); } fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -965,14 +956,13 @@ const Writer = struct { operand: Air.Inst.Ref, dies: bool, ) @TypeOf(s).Error!void { - var i: usize = @enumToInt(operand); + const i = @enumToInt(operand); - if (i < Air.Inst.Ref.typed_value_map.len) { + if (i < InternPool.static_len) { return s.print("@{}", .{operand}); } - i -= Air.Inst.Ref.typed_value_map.len; - return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies); + return w.writeInstIndex(s, i - InternPool.static_len, dies); } fn writeInstIndex( @@ -985,4 +975,9 @@ const Writer = struct { try s.print("%{d}", .{inst}); if (dies) try s.writeByte('!'); } + + fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { + const mod = w.module; + return w.air.typeOfIndex(inst, &mod.intern_pool); + } }; diff --git a/src/print_zir.zig b/src/print_zir.zig index cfa68424d0..6c371b8b8d 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -3,6 +3,7 @@ const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); @@ -1191,7 +1192,7 @@ const Writer = struct { .field => { const field_name = self.code.nullTerminatedString(extra.data.field_name_start); try self.writeInstRef(stream, extra.data.obj_ptr); - try stream.print(", {}", .{std.zig.fmtId(field_name)}); + try stream.print(", \"{}\"", .{std.zig.fmtEscapes(field_name)}); }, } try stream.writeAll(", ["); @@ -2468,14 +2469,9 @@ const Writer = struct { } fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { - var i: usize = @enumToInt(ref); - - if (i < Zir.Inst.Ref.typed_value_map.len) { - return stream.print("@{}", .{ref}); - } - i -= Zir.Inst.Ref.typed_value_map.len; - - return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i)); + const i = @enumToInt(ref); + if (i < InternPool.static_len) return stream.print("@{}", .{@intToEnum(InternPool.Index, i)}); + return self.writeInstIndex(stream, i - InternPool.static_len); } fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { diff --git a/src/target.zig b/src/target.zig index 5e66c8f417..ac78d27c1a 100644 --- a/src/target.zig +++ b/src/target.zig @@ -512,134 +512,6 @@ pub fn needUnwindTables(target: std.Target) bool { return target.os.tag == .windows; } -pub const AtomicPtrAlignmentError = error{ - FloatTooBig, - IntTooBig, - BadType, -}; - -pub const AtomicPtrAlignmentDiagnostics = struct { - bits: u16 = undefined, - max_bits: u16 = undefined, -}; - -/// If ABI alignment of `ty` is OK for atomic operations, returns 0. -/// Otherwise returns the alignment required on a pointer for the target -/// to perform atomic operations. -// TODO this function does not take into account CPU features, which can affect -// this value. Audit this! -pub fn atomicPtrAlignment( - target: std.Target, - ty: Type, - diags: *AtomicPtrAlignmentDiagnostics, -) AtomicPtrAlignmentError!u32 { - const max_atomic_bits: u16 = switch (target.cpu.arch) { - .avr, - .msp430, - .spu_2, - => 16, - - .arc, - .arm, - .armeb, - .hexagon, - .m68k, - .le32, - .mips, - .mipsel, - .nvptx, - .powerpc, - .powerpcle, - .r600, - .riscv32, - .sparc, - .sparcel, - .tce, - .tcele, - .thumb, - .thumbeb, - .x86, - .xcore, - .amdil, - .hsail, - .spir, - .kalimba, - .lanai, - .shave, - .wasm32, - .renderscript32, - .csky, - .spirv32, - .dxil, - .loongarch32, - .xtensa, - => 32, - - .amdgcn, - .bpfel, - .bpfeb, - .le64, - .mips64, - .mips64el, - .nvptx64, - .powerpc64, - .powerpc64le, - .riscv64, - .sparc64, - .s390x, - .amdil64, - .hsail64, - .spir64, - .wasm64, - .renderscript64, - .ve, - .spirv64, - .loongarch64, - => 64, - - .aarch64, - .aarch64_be, - .aarch64_32, - => 128, - - .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, - }; - - var buffer: Type.Payload.Bits = undefined; - - const int_ty = switch (ty.zigTypeTag()) { - .Int => ty, - .Enum => ty.intTagType(&buffer), - .Float => { - const bit_count = ty.floatBits(target); - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.FloatTooBig; - } - return 0; - }, - .Bool => return 0, - else => { - if (ty.isPtrAtRuntime()) return 0; - return error.BadType; - }, - }; - - const bit_count = int_ty.intInfo(target).bits; - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.IntTooBig; - } - - return 0; -} - pub fn defaultAddressSpace( target: std.Target, context: enum { @@ -777,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 { else => "o", // Non-standard }; } + +pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool { + return switch (cc) { + .Unspecified, .Async, .Inline => true, + // For now we want to authorize PTX kernel to use zig objects, even if + // we end up exposing the ABI. The goal is to experiment with more + // integrated CPU/GPU code. + .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, + else => false, + }; +} diff --git a/src/type.zig b/src/type.zig index e5b41e717b..4269ee56d3 100644 --- a/src/type.zig +++ b/src/type.zig @@ -9,176 +9,42 @@ const log = std.log.scoped(.Type); const target_util = @import("target.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); -const file_struct = @This(); +/// Both types and values are canonically represented by a single 32-bit integer +/// which is an index into an `InternPool` data structure. +/// This struct abstracts around this storage by providing methods only +/// applicable to types rather than values in general. +pub const Type = struct { + ip_index: InternPool.Index, -/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. -/// It's important for this type to be small. -/// Types are not de-duplicated, which helps with multi-threading since it obviates the requirement -/// of obtaining a lock on a global type table, as well as making the -/// garbage collection bookkeeping simpler. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Type = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, - - pub fn zigTypeTag(ty: Type) std.builtin.TypeId { - return ty.zigTypeTagOrPoison() catch unreachable; + pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; } - pub fn zigTypeTagOrPoison(ty: Type) error{GenericPoison}!std.builtin.TypeId { - switch (ty.tag()) { - .generic_poison => return error.GenericPoison, - - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .int_signed, - .int_unsigned, - => return .Int, - - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => return .Float, - - .error_set, - .error_set_single, - .anyerror, - .error_set_inferred, - .error_set_merged, - => return .ErrorSet, - - .anyopaque, .@"opaque" => return .Opaque, - .bool => return .Bool, - .void => return .Void, - .type => return .Type, - .comptime_int => return .ComptimeInt, - .comptime_float => return .ComptimeFloat, - .noreturn => return .NoReturn, - .null => return .Null, - .undefined => return .Undefined, - - .fn_noreturn_no_args => return .Fn, - .fn_void_no_args => return .Fn, - .fn_naked_noreturn_no_args => return .Fn, - .fn_ccc_void_no_args => return .Fn, - .function => return .Fn, - - .array, - .array_u8_sentinel_0, - .array_u8, - .array_sentinel, - => return .Array, - - .vector => return .Vector, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .pointer, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return .Pointer, - - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => return .Optional, - .enum_literal => return .EnumLiteral, - - .anyerror_void_error_union, .error_union => return .ErrorUnion, - - .anyframe_T, .@"anyframe" => return .AnyFrame, - - .empty_struct, - .empty_struct_literal, - .@"struct", - .prefetch_options, - .export_options, - .extern_options, - .tuple, - .anon_struct, - => return .Struct, - - .enum_full, - .enum_nonexhaustive, - .enum_simple, - .enum_numbered, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => return .Enum, - - .@"union", - .union_safety_tagged, - .union_tagged, - .type_info, - => return .Union, - } + pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); } - pub fn baseZigTypeTag(self: Type) std.builtin.TypeId { - return switch (self.zigTypeTag()) { - .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(), + pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), .Optional => { - var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(); + return self.optionalChild(mod).baseZigTypeTag(mod); }, else => |t| t, }; } - pub fn isSelfComparable(ty: Type, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag()) { + pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, => true, - .Vector => ty.elemType2().isSelfComparable(is_equality_cmp), + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), .Bool, .Type, @@ -201,1317 +67,70 @@ pub const Type = extern union { .Frame, => false, - .Pointer => !ty.isSlice() and (is_equality_cmp or ty.isCPtr()), + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), .Optional => { if (!is_equality_cmp) return false; - var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(is_equality_cmp); + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); }, }; } - pub fn initTag(comptime small_tag: Tag) Type { - comptime assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; - } - - pub fn initPayload(payload: *Payload) Type { - assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; - } - - pub fn tag(self: Type) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; - } else { - return self.ptr_otherwise.tag; - } - } - - /// Prefer `castTag` to this. - pub fn cast(self: Type, comptime T: type) ?*T { - if (@hasField(T, "base_tag")) { - return self.castTag(T.base_tag); - } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return null; - } - inline for (@typeInfo(Tag).Enum.fields) |field| { - if (field.value < Tag.no_payload_count) - continue; - const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { - if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); - } - return null; - } - } - unreachable; - } - - pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) - return null; - - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); - - return null; - } - - pub fn castPointer(self: Type) ?*Payload.ElemType { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => self.cast(Payload.ElemType), - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - else => null, - }; - } - /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type) ?Type { - if (ty.zigTypeTag() != .Pointer) return null; - const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() != .Fn) return null; + pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; + const elem_ty = ty.childType(mod); + if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } - pub fn ptrIsMutable(ty: Type) bool { - return switch (ty.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .many_const_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .c_const_pointer, - .const_slice, - => false, - - .single_mut_pointer, - .many_mut_pointer, - .manyptr_u8, - .c_mut_pointer, - .mut_slice, - => true, - - .pointer => ty.castTag(.pointer).?.data.mutable, - - else => unreachable, - }; + /// Asserts the type is a pointer. + pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; } - pub const ArrayInfo = struct { elem_type: Type, sentinel: ?Value = null, len: u64 }; - pub fn arrayInfo(self: Type) ArrayInfo { + pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, + }; + + pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { return .{ - .len = self.arrayLen(), - .sentinel = self.sentinel(), - .elem_type = self.elemType(), + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), }; } - pub fn ptrInfo(self: Type) Payload.Pointer { - switch (self.tag()) { - .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.initTag(.comptime_int), - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .many_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .many_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .c_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = false, - .@"volatile" = false, - .size = .C, - } }, - .c_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = true, - .@"volatile" = false, - .size = .C, - } }, - .const_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .mut_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Slice, - } }, - - .pointer => return self.castTag(.pointer).?.*, - - .optional_single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .optional_single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrInfo(); + pub fn ptrInfoIp(ip: *const InternPool, ty: InternPool.Index) InternPool.Key.PtrType { + return switch (ip.indexToKey(ty)) { + .ptr_type => |p| p, + .opt_type => |child| switch (ip.indexToKey(child)) { + .ptr_type => |p| p, + else => unreachable, }, - else => unreachable, - } + }; } - pub fn eql(a: Type, b: Type, mod: *Module) bool { - // As a shortcut, if the small tags / addresses match, we're done. - if (a.tag_if_small_enough == b.tag_if_small_enough) return true; - - switch (a.tag()) { - .generic_poison => unreachable, - - // Detect that e.g. u64 != usize, even if the bits match on a particular target. - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - - .bool, - .void, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .anyopaque, - .@"anyframe", - .enum_literal, - => |a_tag| { - assert(a_tag != b.tag()); // because of the comparison at the top of the function. - return false; - }, - - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .int_signed, - .int_unsigned, - => { - if (b.zigTypeTag() != .Int) return false; - if (b.isNamedInt()) return false; - - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. - const info_a = a.intInfo(@as(Target, undefined)); - const info_b = b.intInfo(@as(Target, undefined)); - return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; - }, - - .error_set_inferred => { - // Inferred error sets are only equal if both are inferred - // and they share the same pointer. - const a_ies = a.castTag(.error_set_inferred).?.data; - const b_ies = (b.castTag(.error_set_inferred) orelse return false).data; - return a_ies == b_ies; - }, - - .anyerror => { - return b.tag() == .anyerror; - }, - - .error_set, - .error_set_single, - .error_set_merged, - => { - switch (b.tag()) { - .error_set, .error_set_single, .error_set_merged => {}, - else => return false, - } - - // Two resolved sets match if their error set names match. - // Since they are pre-sorted we compare them element-wise. - const a_set = a.errorSetNames(); - const b_set = b.errorSetNames(); - if (a_set.len != b_set.len) return false; - for (a_set, 0..) |a_item, i| { - const b_item = b_set[i]; - if (!std.mem.eql(u8, a_item, b_item)) return false; - } - return true; - }, - - .@"opaque" => { - const opaque_obj_a = a.castTag(.@"opaque").?.data; - const opaque_obj_b = (b.castTag(.@"opaque") orelse return false).data; - return opaque_obj_a == opaque_obj_b; - }, - - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { - if (b.zigTypeTag() != .Fn) return false; - - const a_info = a.fnInfo(); - const b_info = b.fnInfo(); - - if (a_info.return_type.tag() != .generic_poison and - b_info.return_type.tag() != .generic_poison and - !eql(a_info.return_type, b_info.return_type, mod)) - return false; - - if (a_info.is_var_args != b_info.is_var_args) - return false; - - if (a_info.is_generic != b_info.is_generic) - return false; - - if (a_info.is_noinline != b_info.is_noinline) - return false; - - if (a_info.noalias_bits != b_info.noalias_bits) - return false; - - if (!a_info.cc_is_generic and a_info.cc != b_info.cc) - return false; - - if (!a_info.align_is_generic and a_info.alignment != b_info.alignment) - return false; - - if (a_info.param_types.len != b_info.param_types.len) - return false; - - for (a_info.param_types, 0..) |a_param_ty, i| { - const b_param_ty = b_info.param_types[i]; - if (a_info.comptime_params[i] != b_info.comptime_params[i]) - return false; - - if (a_param_ty.tag() == .generic_poison) continue; - if (b_param_ty.tag() == .generic_poison) continue; - - if (!eql(a_param_ty, b_param_ty, mod)) - return false; - } - - return true; - }, - - .array, - .array_u8_sentinel_0, - .array_u8, - .array_sentinel, - .vector, - => { - if (a.zigTypeTag() != b.zigTypeTag()) return false; - - if (a.arrayLen() != b.arrayLen()) - return false; - const elem_ty = a.elemType(); - if (!elem_ty.eql(b.elemType(), mod)) - return false; - const sentinel_a = a.sentinel(); - const sentinel_b = b.sentinel(); - if (sentinel_a) |sa| { - if (sentinel_b) |sb| { - return sa.eql(sb, elem_ty, mod); - } else { - return false; - } - } else { - return sentinel_b == null; - } - }, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .pointer, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => { - if (b.zigTypeTag() != .Pointer) return false; - - const info_a = a.ptrInfo().data; - const info_b = b.ptrInfo().data; - if (!info_a.pointee_type.eql(info_b.pointee_type, mod)) - return false; - if (info_a.@"align" != info_b.@"align") - return false; - if (info_a.@"addrspace" != info_b.@"addrspace") - return false; - if (info_a.bit_offset != info_b.bit_offset) - return false; - if (info_a.host_size != info_b.host_size) - return false; - if (info_a.vector_index != info_b.vector_index) - return false; - if (info_a.@"allowzero" != info_b.@"allowzero") - return false; - if (info_a.mutable != info_b.mutable) - return false; - if (info_a.@"volatile" != info_b.@"volatile") - return false; - if (info_a.size != info_b.size) - return false; - - const sentinel_a = info_a.sentinel; - const sentinel_b = info_b.sentinel; - if (sentinel_a) |sa| { - if (sentinel_b) |sb| { - if (!sa.eql(sb, info_a.pointee_type, mod)) - return false; - } else { - return false; - } - } else { - if (sentinel_b != null) - return false; - } - - return true; - }, - - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - if (b.zigTypeTag() != .Optional) return false; - - var buf_a: Payload.ElemType = undefined; - var buf_b: Payload.ElemType = undefined; - return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod); - }, - - .anyerror_void_error_union, .error_union => { - if (b.zigTypeTag() != .ErrorUnion) return false; - - const a_set = a.errorUnionSet(); - const b_set = b.errorUnionSet(); - if (!a_set.eql(b_set, mod)) return false; - - const a_payload = a.errorUnionPayload(); - const b_payload = b.errorUnionPayload(); - if (!a_payload.eql(b_payload, mod)) return false; - - return true; - }, - - .anyframe_T => { - if (b.zigTypeTag() != .AnyFrame) return false; - return a.elemType2().eql(b.elemType2(), mod); - }, - - .empty_struct => { - const a_namespace = a.castTag(.empty_struct).?.data; - const b_namespace = (b.castTag(.empty_struct) orelse return false).data; - return a_namespace == b_namespace; - }, - .@"struct" => { - const a_struct_obj = a.castTag(.@"struct").?.data; - const b_struct_obj = (b.castTag(.@"struct") orelse return false).data; - return a_struct_obj == b_struct_obj; - }, - .tuple, .empty_struct_literal => { - if (!b.isSimpleTuple()) return false; - - const a_tuple = a.tupleFields(); - const b_tuple = b.tupleFields(); - - if (a_tuple.types.len != b_tuple.types.len) return false; - - for (a_tuple.types, 0..) |a_ty, i| { - const b_ty = b_tuple.types[i]; - if (!eql(a_ty, b_ty, mod)) return false; - } - - for (a_tuple.values, 0..) |a_val, i| { - const ty = a_tuple.types[i]; - const b_val = b_tuple.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.tag() == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty, mod)) return false; - } - } - } - - return true; - }, - .anon_struct => { - const a_struct_obj = a.castTag(.anon_struct).?.data; - const b_struct_obj = (b.castTag(.anon_struct) orelse return false).data; - - if (a_struct_obj.types.len != b_struct_obj.types.len) return false; - - for (a_struct_obj.names, 0..) |a_name, i| { - const b_name = b_struct_obj.names[i]; - if (!std.mem.eql(u8, a_name, b_name)) return false; - } - - for (a_struct_obj.types, 0..) |a_ty, i| { - const b_ty = b_struct_obj.types[i]; - if (!eql(a_ty, b_ty, mod)) return false; - } - - for (a_struct_obj.values, 0..) |a_val, i| { - const ty = a_struct_obj.types[i]; - const b_val = b_struct_obj.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.tag() == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty, mod)) return false; - } - } - } - - return true; - }, - - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .@"struct" but b was one of these tags. - .prefetch_options, - .export_options, - .extern_options, - => unreachable, // needed to resolve the type before now - - .enum_full, .enum_nonexhaustive => { - const a_enum_obj = a.cast(Payload.EnumFull).?.data; - const b_enum_obj = (b.cast(Payload.EnumFull) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - .enum_simple => { - const a_enum_obj = a.cast(Payload.EnumSimple).?.data; - const b_enum_obj = (b.cast(Payload.EnumSimple) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - .enum_numbered => { - const a_enum_obj = a.cast(Payload.EnumNumbered).?.data; - const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .enum_simple but b was one of these tags. - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => unreachable, // needed to resolve the type before now - - .@"union", .union_safety_tagged, .union_tagged => { - const a_union_obj = a.cast(Payload.Union).?.data; - const b_union_obj = (b.cast(Payload.Union) orelse return false).data; - return a_union_obj == b_union_obj; - }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .union_tagged but b was one of these tags. - .type_info => unreachable, // needed to resolve the type before now - - } + pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { + return Payload.Pointer.Data.fromKey(ptrInfoIp(&mod.intern_pool, ty.toIntern())); } - pub fn hash(self: Type, mod: *Module) u64 { - var hasher = std.hash.Wyhash.init(0); - self.hashWithHasher(&hasher, mod); - return hasher.final(); + pub fn eql(a: Type, b: Type, mod: *const Module) bool { + _ = mod; // TODO: remove this parameter + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return a.toIntern() == b.toIntern(); } - pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - switch (ty.tag()) { - .generic_poison => unreachable, - - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - std.hash.autoHash(hasher, ty_tag); - }, - - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Float); - std.hash.autoHash(hasher, ty_tag); - }, - - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), - .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), - .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), - .comptime_int => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeInt), - .comptime_float => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeFloat), - .noreturn => std.hash.autoHash(hasher, std.builtin.TypeId.NoReturn), - .null => std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .undefined => std.hash.autoHash(hasher, std.builtin.TypeId.Undefined), - - .anyopaque => { - std.hash.autoHash(hasher, std.builtin.TypeId.Opaque); - std.hash.autoHash(hasher, Tag.anyopaque); - }, - - .@"anyframe" => { - std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - std.hash.autoHash(hasher, Tag.@"anyframe"); - }, - - .enum_literal => { - std.hash.autoHash(hasher, std.builtin.TypeId.EnumLiteral); - std.hash.autoHash(hasher, Tag.enum_literal); - }, - - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .int_signed, - .int_unsigned, - => { - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - const info = ty.intInfo(@as(Target, undefined)); - std.hash.autoHash(hasher, info.signedness); - std.hash.autoHash(hasher, info.bits); - }, - - .error_set, - .error_set_single, - .error_set_merged, - => { - // all are treated like an "error set" for hashing - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.error_set); - - const names = ty.errorSetNames(); - std.hash.autoHash(hasher, names.len); - assert(std.sort.isSorted([]const u8, names, u8, std.mem.lessThan)); - for (names) |name| hasher.update(name); - }, - - .anyerror => { - // anyerror is distinct from other error sets - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.anyerror); - }, - - .error_set_inferred => { - // inferred error sets are compared using their data pointer - const ies: *Module.Fn.InferredErrorSet = ty.castTag(.error_set_inferred).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.error_set_inferred); - std.hash.autoHash(hasher, ies); - }, - - .@"opaque" => { - std.hash.autoHash(hasher, std.builtin.TypeId.Opaque); - const opaque_obj = ty.castTag(.@"opaque").?.data; - std.hash.autoHash(hasher, opaque_obj); - }, - - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { - std.hash.autoHash(hasher, std.builtin.TypeId.Fn); - - const fn_info = ty.fnInfo(); - if (fn_info.return_type.tag() != .generic_poison) { - hashWithHasher(fn_info.return_type, hasher, mod); - } - if (!fn_info.align_is_generic) { - std.hash.autoHash(hasher, fn_info.alignment); - } - if (!fn_info.cc_is_generic) { - std.hash.autoHash(hasher, fn_info.cc); - } - std.hash.autoHash(hasher, fn_info.is_var_args); - std.hash.autoHash(hasher, fn_info.is_generic); - std.hash.autoHash(hasher, fn_info.is_noinline); - std.hash.autoHash(hasher, fn_info.noalias_bits); - - std.hash.autoHash(hasher, fn_info.param_types.len); - for (fn_info.param_types, 0..) |param_ty, i| { - std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); - if (param_ty.tag() == .generic_poison) continue; - hashWithHasher(param_ty, hasher, mod); - } - }, - - .array, - .array_u8_sentinel_0, - .array_u8, - .array_sentinel, - => { - std.hash.autoHash(hasher, std.builtin.TypeId.Array); - - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.arrayLen()); - hashWithHasher(elem_ty, hasher, mod); - hashSentinel(ty.sentinel(), elem_ty, hasher, mod); - }, - - .vector => { - std.hash.autoHash(hasher, std.builtin.TypeId.Vector); - - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.vectorLen()); - hashWithHasher(elem_ty, hasher, mod); - }, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .pointer, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => { - std.hash.autoHash(hasher, std.builtin.TypeId.Pointer); - - const info = ty.ptrInfo().data; - hashWithHasher(info.pointee_type, hasher, mod); - hashSentinel(info.sentinel, info.pointee_type, hasher, mod); - std.hash.autoHash(hasher, info.@"align"); - std.hash.autoHash(hasher, info.@"addrspace"); - std.hash.autoHash(hasher, info.bit_offset); - std.hash.autoHash(hasher, info.host_size); - std.hash.autoHash(hasher, info.vector_index); - std.hash.autoHash(hasher, info.@"allowzero"); - std.hash.autoHash(hasher, info.mutable); - std.hash.autoHash(hasher, info.@"volatile"); - std.hash.autoHash(hasher, info.size); - }, - - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - std.hash.autoHash(hasher, std.builtin.TypeId.Optional); - - var buf: Payload.ElemType = undefined; - hashWithHasher(ty.optionalChild(&buf), hasher, mod); - }, - - .anyerror_void_error_union, .error_union => { - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); - - const set_ty = ty.errorUnionSet(); - hashWithHasher(set_ty, hasher, mod); - - const payload_ty = ty.errorUnionPayload(); - hashWithHasher(payload_ty, hasher, mod); - }, - - .anyframe_T => { - std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - hashWithHasher(ty.childType(), hasher, mod); - }, - - .empty_struct => { - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - const namespace: *const Module.Namespace = ty.castTag(.empty_struct).?.data; - std.hash.autoHash(hasher, namespace); - }, - .@"struct" => { - const struct_obj: *const Module.Struct = ty.castTag(.@"struct").?.data; - std.hash.autoHash(hasher, struct_obj); - }, - .tuple, .empty_struct_literal => { - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - - const tuple = ty.tupleFields(); - std.hash.autoHash(hasher, tuple.types.len); - - for (tuple.types, 0..) |field_ty, i| { - hashWithHasher(field_ty, hasher, mod); - const field_val = tuple.values[i]; - if (field_val.tag() == .unreachable_value) continue; - field_val.hash(field_ty, hasher, mod); - } - }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - std.hash.autoHash(hasher, struct_obj.types.len); - - for (struct_obj.types, 0..) |field_ty, i| { - const field_name = struct_obj.names[i]; - const field_val = struct_obj.values[i]; - hasher.update(field_name); - hashWithHasher(field_ty, hasher, mod); - if (field_val.tag() == .unreachable_value) continue; - field_val.hash(field_ty, hasher, mod); - } - }, - - // we can't hash these based on tags because they wouldn't match the expanded version. - .prefetch_options, - .export_options, - .extern_options, - => unreachable, // needed to resolve the type before now - - .enum_full, .enum_nonexhaustive => { - const enum_obj: *const Module.EnumFull = ty.cast(Payload.EnumFull).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - .enum_simple => { - const enum_obj: *const Module.EnumSimple = ty.cast(Payload.EnumSimple).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - .enum_numbered => { - const enum_obj: *const Module.EnumNumbered = ty.cast(Payload.EnumNumbered).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => unreachable, // needed to resolve the type before now - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj: *const Module.Union = ty.cast(Payload.Union).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Union); - std.hash.autoHash(hasher, union_obj); - }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .type_info => unreachable, // needed to resolve the type before now - - } - } - - fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (opt_val) |s| { - std.hash.autoHash(hasher, true); - s.hash(ty, hasher, mod); - } else { - std.hash.autoHash(hasher, false); - } - } - - pub const HashContext64 = struct { - mod: *Module, - - pub fn hash(self: @This(), t: Type) u64 { - return t.hash(self.mod); - } - pub fn eql(self: @This(), a: Type, b: Type) bool { - return a.eql(b, self.mod); - } - }; - - pub const HashContext32 = struct { - mod: *Module, - - pub fn hash(self: @This(), t: Type) u32 { - return @truncate(u32, t.hash(self.mod)); - } - pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool { - _ = b_index; - return a.eql(b, self.mod); - } - }; - - pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Type{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .enum_literal, - .anyerror_void_error_union, - .inferred_alloc_const, - .inferred_alloc_mut, - .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - .generic_poison, - => unreachable, - - .array_u8, - .array_u8_sentinel_0, - => return self.copyPayloadShallow(allocator, Payload.Len), - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyframe_T, - => { - const payload = self.cast(Payload.ElemType).?; - const new_payload = try allocator.create(Payload.ElemType); - new_payload.* = .{ - .base = .{ .tag = payload.base.tag }, - .data = try payload.data.copy(allocator), - }; - return Type{ .ptr_otherwise = &new_payload.base }; - }, - - .int_signed, - .int_unsigned, - => return self.copyPayloadShallow(allocator, Payload.Bits), - - .vector => { - const payload = self.castTag(.vector).?.data; - return Tag.vector.create(allocator, .{ - .len = payload.len, - .elem_type = try payload.elem_type.copy(allocator), - }); - }, - .array => { - const payload = self.castTag(.array).?.data; - return Tag.array.create(allocator, .{ - .len = payload.len, - .elem_type = try payload.elem_type.copy(allocator), - }); - }, - .array_sentinel => { - const payload = self.castTag(.array_sentinel).?.data; - return Tag.array_sentinel.create(allocator, .{ - .len = payload.len, - .sentinel = try payload.sentinel.copy(allocator), - .elem_type = try payload.elem_type.copy(allocator), - }); - }, - .tuple => { - const payload = self.castTag(.tuple).?.data; - const types = try allocator.alloc(Type, payload.types.len); - const values = try allocator.alloc(Value, payload.values.len); - for (payload.types, 0..) |ty, i| { - types[i] = try ty.copy(allocator); - } - for (payload.values, 0..) |val, i| { - values[i] = try val.copy(allocator); - } - return Tag.tuple.create(allocator, .{ - .types = types, - .values = values, - }); - }, - .anon_struct => { - const payload = self.castTag(.anon_struct).?.data; - const names = try allocator.alloc([]const u8, payload.names.len); - const types = try allocator.alloc(Type, payload.types.len); - const values = try allocator.alloc(Value, payload.values.len); - for (payload.names, 0..) |name, i| { - names[i] = try allocator.dupe(u8, name); - } - for (payload.types, 0..) |ty, i| { - types[i] = try ty.copy(allocator); - } - for (payload.values, 0..) |val, i| { - values[i] = try val.copy(allocator); - } - return Tag.anon_struct.create(allocator, .{ - .names = names, - .types = types, - .values = values, - }); - }, - .function => { - const payload = self.castTag(.function).?.data; - const param_types = try allocator.alloc(Type, payload.param_types.len); - for (payload.param_types, 0..) |param_ty, i| { - param_types[i] = try param_ty.copy(allocator); - } - const other_comptime_params = payload.comptime_params[0..payload.param_types.len]; - const comptime_params = try allocator.dupe(bool, other_comptime_params); - return Tag.function.create(allocator, .{ - .return_type = try payload.return_type.copy(allocator), - .param_types = param_types, - .cc = payload.cc, - .alignment = payload.alignment, - .is_var_args = payload.is_var_args, - .is_generic = payload.is_generic, - .is_noinline = payload.is_noinline, - .comptime_params = comptime_params.ptr, - .align_is_generic = payload.align_is_generic, - .cc_is_generic = payload.cc_is_generic, - .section_is_generic = payload.section_is_generic, - .addrspace_is_generic = payload.addrspace_is_generic, - .noalias_bits = payload.noalias_bits, - }); - }, - .pointer => { - const payload = self.castTag(.pointer).?.data; - const sent: ?Value = if (payload.sentinel) |some| - try some.copy(allocator) - else - null; - return Tag.pointer.create(allocator, .{ - .pointee_type = try payload.pointee_type.copy(allocator), - .sentinel = sent, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = payload.size, - }); - }, - .error_union => { - const payload = self.castTag(.error_union).?.data; - return Tag.error_union.create(allocator, .{ - .error_set = try payload.error_set.copy(allocator), - .payload = try payload.payload.copy(allocator), - }); - }, - .error_set_merged => { - const names = self.castTag(.error_set_merged).?.data.keys(); - var duped_names = Module.ErrorSet.NameMap{}; - try duped_names.ensureTotalCapacity(allocator, names.len); - for (names) |name| { - duped_names.putAssumeCapacityNoClobber(name, {}); - } - return Tag.error_set_merged.create(allocator, duped_names); - }, - .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), - .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), - .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope), - .@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct), - .@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union), - .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), - .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), - .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), - .@"opaque" => return self.copyPayloadShallow(allocator, Payload.Opaque), - } - } - - fn copyPayloadShallow(self: Type, allocator: Allocator, comptime T: type) error{OutOfMemory}!Type { - const payload = self.cast(T).?; - const new_payload = try allocator.create(T); - new_payload.* = payload.*; - return Type{ .ptr_otherwise = &new_payload.base }; + pub fn hash(ty: Type, mod: *const Module) u32 { + _ = mod; // TODO: remove this parameter + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return std.hash.uint32(@enumToInt(ty.toIntern())); } pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -1550,7 +169,7 @@ pub const Type = extern union { } /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the target. + /// we also need access to the module. pub fn dump( start_type: Type, comptime unused_format_string: []const u8, @@ -1559,372 +178,7 @@ pub const Type = extern union { ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); - if (true) { - // This is disabled to work around a bug where this function - // recursively causes more generic function instantiations - // resulting in an infinite loop in the compiler. - try writer.writeAll("[TODO fix internal compiler bug regarding dump]"); - return; - } - var ty = start_type; - while (true) { - const t = ty.tag(); - switch (t) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, - .bool, - .void, - .type, - .anyerror, - .@"anyframe", - .comptime_int, - .comptime_float, - .noreturn, - => return writer.writeAll(@tagName(t)), - - .enum_literal => return writer.writeAll("@Type(.EnumLiteral)"), - .null => return writer.writeAll("@Type(.Null)"), - .undefined => return writer.writeAll("@Type(.Undefined)"), - - .empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"), - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), struct_obj.owner_decl, - }); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), union_obj.owner_decl, - }); - }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_full.owner_decl, - }); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_simple.owner_decl, - }); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_numbered.owner_decl, - }); - }, - .@"opaque" => { - const opaque_obj = ty.castTag(.@"opaque").?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), opaque_obj.owner_decl, - }); - }, - - .anyerror_void_error_union => return writer.writeAll("anyerror!void"), - .const_slice_u8 => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => return writer.writeAll("fn() noreturn"), - .fn_void_no_args => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => return writer.writeAll("fn() callconv(.C) void"), - .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), - .manyptr_u8 => return writer.writeAll("[*]u8"), - .manyptr_const_u8 => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"), - .atomic_order => return writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op => return writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention => return writer.writeAll("std.builtin.CallingConvention"), - .address_space => return writer.writeAll("std.builtin.AddressSpace"), - .float_mode => return writer.writeAll("std.builtin.FloatMode"), - .reduce_op => return writer.writeAll("std.builtin.ReduceOp"), - .modifier => return writer.writeAll("std.builtin.CallModifier"), - .prefetch_options => return writer.writeAll("std.builtin.PrefetchOptions"), - .export_options => return writer.writeAll("std.builtin.ExportOptions"), - .extern_options => return writer.writeAll("std.builtin.ExternOptions"), - .type_info => return writer.writeAll("std.builtin.Type"), - .function => { - const payload = ty.castTag(.function).?.data; - try writer.writeAll("fn("); - for (payload.param_types, 0..) |param_type, i| { - if (i != 0) try writer.writeAll(", "); - try param_type.dump("", .{}, writer); - } - if (payload.is_var_args) { - if (payload.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (payload.alignment != 0) { - try writer.print("align({d}) ", .{payload.alignment}); - } - if (payload.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(payload.cc)); - try writer.writeAll(") "); - } - ty = payload.return_type; - continue; - }, - - .anyframe_T => { - const return_type = ty.castTag(.anyframe_T).?.data; - try writer.print("anyframe->", .{}); - ty = return_type; - continue; - }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - return writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - return writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try payload.elem_type.dump("", .{}, writer); - return writer.writeAll(")"); - }, - .array => { - const payload = ty.castTag(.array).?.data; - try writer.print("[{d}]", .{payload.len}); - ty = payload.elem_type; - continue; - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - try writer.print("[{d}:{}]", .{ - payload.len, - payload.sentinel.fmtDebug(), - }); - ty = payload.elem_type; - continue; - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.tag() != .unreachable_value) { - try writer.writeAll("comptime "); - } - try field_ty.dump("", .{}, writer); - if (val.tag() != .unreachable_value) { - try writer.print(" = {}", .{val.fmtDebug()}); - } - } - try writer.writeAll("}"); - return; - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.tag() != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); - try field_ty.dump("", .{}, writer); - if (val.tag() != .unreachable_value) { - try writer.print(" = {}", .{val.fmtDebug()}); - } - } - try writer.writeAll("}"); - return; - }, - .single_const_pointer => { - const pointee_type = ty.castTag(.single_const_pointer).?.data; - try writer.writeAll("*const "); - ty = pointee_type; - continue; - }, - .single_mut_pointer => { - const pointee_type = ty.castTag(.single_mut_pointer).?.data; - try writer.writeAll("*"); - ty = pointee_type; - continue; - }, - .many_const_pointer => { - const pointee_type = ty.castTag(.many_const_pointer).?.data; - try writer.writeAll("[*]const "); - ty = pointee_type; - continue; - }, - .many_mut_pointer => { - const pointee_type = ty.castTag(.many_mut_pointer).?.data; - try writer.writeAll("[*]"); - ty = pointee_type; - continue; - }, - .c_const_pointer => { - const pointee_type = ty.castTag(.c_const_pointer).?.data; - try writer.writeAll("[*c]const "); - ty = pointee_type; - continue; - }, - .c_mut_pointer => { - const pointee_type = ty.castTag(.c_mut_pointer).?.data; - try writer.writeAll("[*c]"); - ty = pointee_type; - continue; - }, - .const_slice => { - const pointee_type = ty.castTag(.const_slice).?.data; - try writer.writeAll("[]const "); - ty = pointee_type; - continue; - }, - .mut_slice => { - const pointee_type = ty.castTag(.mut_slice).?.data; - try writer.writeAll("[]"); - ty = pointee_type; - continue; - }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, - .optional => { - const child_type = ty.castTag(.optional).?.data; - try writer.writeByte('?'); - ty = child_type; - continue; - }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - ty = pointee_type; - continue; - }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - ty = pointee_type; - continue; - }, - - .pointer => { - const payload = ty.castTag(.pointer).?.data; - if (payload.sentinel) |some| switch (payload.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{some.fmtDebug()}), - .Slice => try writer.print("[:{}]", .{some.fmtDebug()}), - } else switch (payload.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (payload.@"align" != 0 or payload.host_size != 0 or payload.vector_index != .none) { - try writer.print("align({d}", .{payload.@"align"}); - - if (payload.bit_offset != 0 or payload.host_size != 0) { - try writer.print(":{d}:{d}", .{ payload.bit_offset, payload.host_size }); - } - if (payload.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (payload.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(payload.vector_index)}); - } - try writer.writeAll(") "); - } - if (payload.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(payload.@"addrspace")}); - } - if (!payload.mutable) try writer.writeAll("const "); - if (payload.@"volatile") try writer.writeAll("volatile "); - if (payload.@"allowzero" and payload.size != .C) try writer.writeAll("allowzero "); - - ty = payload.pointee_type; - continue; - }, - .error_union => { - const payload = ty.castTag(.error_union).?.data; - try payload.error_set.dump("", .{}, writer); - try writer.writeAll("!"); - ty = payload.payload; - continue; - }, - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - return; - }, - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; - return writer.print("({s} func={d})", .{ - @tagName(t), func.owner_decl, - }); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - return; - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .inferred_alloc_const => return writer.writeAll("(inferred_alloc_const)"), - .inferred_alloc_mut => return writer.writeAll("(inferred_alloc_mut)"), - .generic_poison => return writer.writeAll("(generic poison)"), - } - unreachable; - } + return writer.print("{any}", .{start_type.ip_index}); } pub const nameAllocArena = nameAlloc; @@ -1938,253 +192,16 @@ pub const Type = extern union { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - const t = ty.tag(); - switch (t) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - - // TODO get rid of these Type.Tag values. - .atomic_order => unreachable, - .atomic_rmw_op => unreachable, - .calling_convention => unreachable, - .address_space => unreachable, - .float_mode => unreachable, - .reduce_op => unreachable, - .modifier => unreachable, - .prefetch_options => unreachable, - .export_options => unreachable, - .extern_options => unreachable, - .type_info => unreachable, - - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, - .bool, - .void, - .type, - .anyerror, - .@"anyframe", - .comptime_int, - .comptime_float, - .noreturn, - => try writer.writeAll(@tagName(t)), - - .enum_literal => try writer.writeAll("@TypeOf(.enum_literal)"), - .null => try writer.writeAll("@TypeOf(null)"), - .undefined => try writer.writeAll("@TypeOf(undefined)"), - .empty_struct_literal => try writer.writeAll("@TypeOf(.{})"), - - .empty_struct => { - const namespace = ty.castTag(.empty_struct).?.data; - try namespace.renderFullyQualifiedName(mod, "", writer); + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - const decl = mod.declPtr(struct_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const decl = mod.declPtr(union_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const decl = mod.declPtr(enum_full.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const decl = mod.declPtr(enum_simple.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - const decl = mod.declPtr(enum_numbered.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - const decl = mod.declPtr(opaque_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - - .anyerror_void_error_union => try writer.writeAll("anyerror!void"), - .const_slice_u8 => try writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => try writer.writeAll("fn() noreturn"), - .fn_void_no_args => try writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => try writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => try writer.writeAll("fn() callconv(.C) void"), - .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), - .manyptr_u8 => try writer.writeAll("[*]u8"), - .manyptr_const_u8 => try writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => try writer.writeAll("[*:0]const u8"), - - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; - - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - - .function => { - const fn_info = ty.fnInfo(); - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (fn_info.paramIsComptime(i)) { - try writer.writeAll("comptime "); - } - if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { - try writer.writeAll("noalias "); - }; - if (param_ty.tag() == .generic_poison) { - try writer.writeAll("anytype"); - } else { - try print(param_ty, writer, mod); - } - } - if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); - } - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type.tag() == .generic_poison) { - try writer.writeAll("anytype"); - } else { - try print(fn_info.return_type, writer, mod); - } - }, - - .error_union => { - const error_union = ty.castTag(.error_union).?.data; - try print(error_union.error_set, writer, mod); - try writer.writeAll("!"); - try print(error_union.payload, writer, mod); - }, - - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - try writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - try writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try print(payload.elem_type, writer, mod); - try writer.writeAll(")"); - }, - .array => { - const payload = ty.castTag(.array).?.data; - try writer.print("[{d}]", .{payload.len}); - try print(payload.elem_type, writer, mod); - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - try writer.print("[{d}:{}]", .{ - payload.len, - payload.sentinel.fmtValue(payload.elem_type, mod), - }); - try print(payload.elem_type, writer, mod); - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.tag() != .unreachable_value) { - try writer.writeAll("comptime "); - } - try print(field_ty, writer, mod); - if (val.tag() != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); - } - } - try writer.writeAll("}"); - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.tag() != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); - - try print(field_ty, writer, mod); - - if (val.tag() != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); - } - } - try writer.writeAll("}"); - }, - - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const info = ty.ptrInfo().data; + .ptr_type => { + const info = ty.ptrInfo(mod); if (info.sentinel) |s| switch (info.size) { .One, .C => unreachable, @@ -2200,7 +217,7 @@ pub const Type = extern union { if (info.@"align" != 0) { try writer.print("align({d}", .{info.@"align"}); } else { - const alignment = info.pointee_type.abiAlignment(mod.getTarget()); + const alignment = info.pointee_type.abiAlignment(mod); try writer.print("align({d}", .{alignment}); } @@ -2222,127 +239,228 @@ pub const Type = extern union { if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); try print(info.pointee_type, writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(array_type.child.toType(), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod), + }); + try print(array_type.child.toType(), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(vector_type.child.toType(), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + return print(child.toType(), writer, mod); + }, + .error_union_type => |error_union_type| { + try print(error_union_type.error_set_type.toType(), writer, mod); + try writer.writeByte('!'); + try print(error_union_type.payload_type.toType(), writer, mod); + return; + }, + .inferred_error_set_type => |index| { + const ies = mod.inferredErrorSetPtr(index); + const func = ies.func; + + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names, 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.print("{}", .{name.fmt(&mod.intern_pool)}); + } + try writer.writeAll("}"); + }, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + => return writer.writeAll(@tagName(s)), + + .null, + .undefined, + => try writer.print("@TypeOf({s})", .{@tagName(s)}), + + .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), + .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), + .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), + .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), + .address_space => try writer.writeAll("std.builtin.AddressSpace"), + .float_mode => try writer.writeAll("std.builtin.FloatMode"), + .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), + .call_modifier => try writer.writeAll("std.builtin.CallModifier"), + .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), + .export_options => try writer.writeAll("std.builtin.ExportOptions"), + .extern_options => try writer.writeAll("std.builtin.ExternOptions"), + .type_info => try writer.writeAll("std.builtin.Type"), + + .generic_poison => unreachable, + }, + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + const decl = mod.declPtr(struct_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + } else if (struct_type.namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, .empty, writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.types.len == 0) { + return writer.writeAll("@TypeOf(.{})"); + } + try writer.writeAll("struct{"); + for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); + } + if (anon_struct.names.len != 0) { + try writer.print("{}: ", .{anon_struct.names[i].fmt(&mod.intern_pool)}); + } + + try print(field_ty.toType(), writer, mod); + + if (val != .none) { + try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)}); + } + } + try writer.writeAll("}"); }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const decl = mod.declPtr(union_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); + .opaque_type => |opaque_type| { + const decl = mod.declPtr(opaque_type.decl); + try decl.renderFullyQualifiedName(mod, writer); }, - .optional => { - const child_type = ty.castTag(.optional).?.data; - try writer.writeByte('?'); - try print(child_type, writer, mod); + .enum_type => |enum_type| { + const decl = mod.declPtr(enum_type.decl); + try decl.renderFullyQualifiedName(mod, writer); }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - try print(pointee_type, writer, mod); - }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - try print(pointee_type, writer, mod); - }, - .anyframe_T => { - const return_type = ty.castTag(.anyframe_T).?.data; - try writer.print("anyframe->", .{}); - try print(return_type, writer, mod); - }, - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); } - try writer.writeAll("}"); - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); + try writer.writeAll("fn("); + for (fn_info.param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(param_ty.toType(), writer, mod); + } + } + if (fn_info.is_var_args) { + if (fn_info.param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + try writer.print("align({d}) ", .{a}); + } + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(fn_info.return_type.toType(), writer, mod); } - try writer.writeAll("}"); }, + .anyframe_type => |child| { + if (child == .none) return writer.writeAll("anyframe"); + try writer.writeAll("anyframe->"); + return print(child.toType(), writer, mod); + }, + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, } } - pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { - switch (self.tag()) { - .u1 => return Value.initTag(.u1_type), - .u8 => return Value.initTag(.u8_type), - .i8 => return Value.initTag(.i8_type), - .u16 => return Value.initTag(.u16_type), - .u29 => return Value.initTag(.u29_type), - .i16 => return Value.initTag(.i16_type), - .u32 => return Value.initTag(.u32_type), - .i32 => return Value.initTag(.i32_type), - .u64 => return Value.initTag(.u64_type), - .i64 => return Value.initTag(.i64_type), - .usize => return Value.initTag(.usize_type), - .isize => return Value.initTag(.isize_type), - .c_char => return Value.initTag(.c_char_type), - .c_short => return Value.initTag(.c_short_type), - .c_ushort => return Value.initTag(.c_ushort_type), - .c_int => return Value.initTag(.c_int_type), - .c_uint => return Value.initTag(.c_uint_type), - .c_long => return Value.initTag(.c_long_type), - .c_ulong => return Value.initTag(.c_ulong_type), - .c_longlong => return Value.initTag(.c_longlong_type), - .c_ulonglong => return Value.initTag(.c_ulonglong_type), - .c_longdouble => return Value.initTag(.c_longdouble_type), - .anyopaque => return Value.initTag(.anyopaque_type), - .f16 => return Value.initTag(.f16_type), - .f32 => return Value.initTag(.f32_type), - .f64 => return Value.initTag(.f64_type), - .f80 => return Value.initTag(.f80_type), - .f128 => return Value.initTag(.f128_type), - .bool => return Value.initTag(.bool_type), - .void => return Value.initTag(.void_type), - .type => return Value.initTag(.type_type), - .anyerror => return Value.initTag(.anyerror_type), - .@"anyframe" => return Value.initTag(.anyframe_type), - .comptime_int => return Value.initTag(.comptime_int_type), - .comptime_float => return Value.initTag(.comptime_float_type), - .noreturn => return Value.initTag(.noreturn_type), - .null => return Value.initTag(.null_type), - .undefined => return Value.initTag(.undefined_type), - .fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type), - .fn_void_no_args => return Value.initTag(.fn_void_no_args_type), - .fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type), - .fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type), - .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), - .const_slice_u8 => return Value.initTag(.const_slice_u8_type), - .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), - .enum_literal => return Value.initTag(.enum_literal_type), - .manyptr_u8 => return Value.initTag(.manyptr_u8_type), - .manyptr_const_u8 => return Value.initTag(.manyptr_const_u8_type), - .manyptr_const_u8_sentinel_0 => return Value.initTag(.manyptr_const_u8_sentinel_0_type), - .atomic_order => return Value.initTag(.atomic_order_type), - .atomic_rmw_op => return Value.initTag(.atomic_rmw_op_type), - .calling_convention => return Value.initTag(.calling_convention_type), - .address_space => return Value.initTag(.address_space_type), - .float_mode => return Value.initTag(.float_mode_type), - .reduce_op => return Value.initTag(.reduce_op_type), - .modifier => return Value.initTag(.modifier_type), - .prefetch_options => return Value.initTag(.prefetch_options_type), - .export_options => return Value.initTag(.export_options_type), - .extern_options => return Value.initTag(.extern_options_type), - .type_info => return Value.initTag(.type_info_type), - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - else => return Value.Tag.ty.create(allocator, self), - } + pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; + } + + pub fn toValue(self: Type) Value { + return self.toIntern().toValue(); } const RuntimeBitsError = Module.CompileError || error{NeedLazy}; @@ -2360,365 +478,319 @@ pub const Type = extern union { /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, + mod: *Module, ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { - switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .bool, - .anyerror, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .array_u8_sentinel_0, - .anyerror_void_error_union, - .error_set_inferred, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .@"anyframe", - .anyopaque, - .@"opaque", - .type_info, - .error_set_single, - .error_union, - .error_set, - .error_set_merged, - => return true, - - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - .anyframe_T, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .pointer, - => { - if (ignore_comptime_only) { - return true; - } else if (ty.childType().zigTypeTag() == .Fn) { - return !ty.childType().fnInfo().is_generic; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(ty)); - } else { - return !comptimeOnly(ty); - } - }, - - // These are false because they are comptime-only types. - .single_const_pointer_to_comptime_int, - .void, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .empty_struct, - .empty_struct_literal, - // These are function *bodies*, not pointers. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .function, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - => return false, - - .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - if (child_ty.isNoReturn()) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) { - return true; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(child_ty)); - } else { - return !comptimeOnly(child_ty); - } - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - struct_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_obj.haveFieldTypes()), - .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, - } - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + return switch (ty.toIntern()) { + // False because it is a comptime-only type. + .empty_struct_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.bits != 0, + .ptr_type => |ptr_type| { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + const child_ty = ptr_type.child.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic; + if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); + return !comptimeOnly(ty, mod); + }, + .anyframe_type => true, + .array_type => |array_type| { + if (array_type.sentinel != .none) { + return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } else { + return array_type.len > 0 and + try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } + }, + .vector_type => |vector_type| { + return vector_type.len > 0 and + try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, + .opt_type => |child| { + const child_ty = child.toType(); + if (child_ty.isNoReturn(mod)) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { return true; - } else { - return false; - } - }, + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(child_ty)); + } else { + return !comptimeOnly(child_ty, mod); + } + }, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + => true, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.fields.count() >= 2; - }, - .enum_numbered, .enum_nonexhaustive => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); - }, + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - if (union_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .anyerror, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => true, + + // These are false because they are comptime-only types. + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + => false, + + .generic_poison => unreachable, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // This struct has no fields. + return false; + }; + if (struct_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + struct_obj.assumed_runtime_bits = true; return true; - } else { + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(struct_obj.haveFieldTypes()), + .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, + } + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if (try field_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } return false; - } + }, + + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_type.runtime_tag) { + .none => { + if (union_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_obj.assumed_runtime_bits = true; + return true; + } + }, + .safety, .tagged => { + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { + return true; + } + }, + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_obj.haveFieldTypes()), + .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, + } + for (union_obj.fields.values()) |value| { + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + + .opaque_type => true, + .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) { - return true; - } - - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - - .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), - .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), - - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true; - } - return false; - }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - } + }; } /// true if and only if the type has a well-defined memory layout /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type) bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .bool, - .void, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, - .enum_numbered, - .vector, - .optional_single_mut_pointer, - .optional_single_const_pointer, + pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type, + .vector_type, => true, - .anyopaque, - .anyerror, - .noreturn, - .null, - .@"anyframe", - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .anon_struct_type, + .opaque_type, + .anyframe_type, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .enum_simple, - .error_union, - .anyerror_void_error_union, - .anyframe_T, - .tuple, - .anon_struct, - .empty_struct_literal, - .empty_struct, + .func_type, => false, - .enum_full, - .enum_nonexhaustive, - => !ty.cast(Payload.EnumFull).?.data.tag_ty_inferred, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .opt_type => ty.isPtrLikeOptional(mod), + .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, + => true, - .array, - .array_sentinel, - => ty.childType().hasWellDefinedLayout(), + .anyerror, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, + => false, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // Struct with no fields has a well-defined layout of no bits. + return true; + }; + return struct_obj.layout != .Auto; + }, + .union_type => |union_type| switch (union_type.runtime_tag) { + .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, + .tagged => false, + }, + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, - .optional => ty.isPtrLikeOptional(), - .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, - .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, - .union_tagged => false, + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }; } - pub fn hasRuntimeBits(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, false, .eager) catch unreachable; + pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; } - pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, true, .eager) catch unreachable; + pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; } - pub fn isFnOrHasRuntimeBits(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -2727,131 +799,66 @@ pub const Type = extern union { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly()) return false; + if (fn_info.return_type.toType().comptimeOnly(mod)) return false; return true; }, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), } } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(), + else => return ty.hasRuntimeBitsIgnoreComptime(mod), }; } - /// TODO add enums with no fields here - pub fn isNoReturn(ty: Type) bool { - switch (ty.tag()) { - .noreturn => return true, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - const names = err_set_obj.names.keys(); - return names.len == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - const names = name_map.keys(); - return names.len == 0; - }, - else => return false, - } + pub fn isNoReturn(ty: Type, mod: *Module) bool { + return mod.intern_pool.isNoReturn(ty.toIntern()); } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, target: Target) u32 { - return ptrAlignmentAdvanced(ty, target, null) catch unreachable; + pub fn ptrAlignment(ty: Type, mod: *Module) u32 { + return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, target: Target, opt_sema: ?*Sema) !u32 { - switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - const child_type = ty.cast(Payload.ElemType).?.data; - if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(target, .{ .sema = sema }); - return res.scalar; - } - return (child_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; - }, - - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return 1, - - .pointer => { - const ptr_info = ty.castTag(.pointer).?.data; - if (ptr_info.@"align" != 0) { - return ptr_info.@"align"; + pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| { + if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| { + return @intCast(u32, a); } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(target, opt_sema), - + .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema), else => unreachable, - } + }; } - pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace { - return switch (self.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .generic, - - .pointer => self.castTag(.pointer).?.data.@"addrspace", - - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrAddressSpace(); - }, - + pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, else => unreachable, }; } /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, target: Target) u32 { - return (ty.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + pub fn abiAlignment(ty: Type, mod: *Module) u32 { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(target, .{ .lazy = arena })) { + /// Returned value has type `comptime_int`. + pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(Type.comptime_int, x), } } @@ -2862,7 +869,7 @@ pub const Type = extern union { pub const AbiAlignmentAdvancedStrat = union(enum) { eager, - lazy: Allocator, + lazy, sema: *Sema, }; @@ -2874,314 +881,322 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - target: Target, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const opt_sema = switch (strat) { .sema => |sema| sema, else => null, }; - switch (ty.tag()) { - .u1, - .u8, - .i8, - .bool, - .array_u8_sentinel_0, - .array_u8, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .@"opaque", - .anyopaque, - => return AbiAlignmentAdvanced{ .scalar = 1 }, - .fn_noreturn_no_args, // represents machine code; not a pointer - .fn_void_no_args, // represents machine code; not a pointer - .fn_naked_noreturn_no_args, // represents machine code; not a pointer - .fn_ccc_void_no_args, // represents machine code; not a pointer - => return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }, - - // represents machine code; not a pointer - .function => { - const alignment = ty.castTag(.function).?.data.alignment; - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; - }, - - .isize, - .usize, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .@"anyframe", - .anyframe_T, - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, - .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, - .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - - .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, - .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, - else => return AbiAlignmentAdvanced{ .scalar = 8 }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, - }; - const u80_ty = initPayload(&payload.base); - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) }; + switch (ty.toIntern()) { + .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; + return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; + }, + .ptr_type, .anyframe_type => { + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + }, + .array_type => |array_type| { + return array_type.child.toType().abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); + const bits = @intCast(u32, bits_u64); + const bytes = ((bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return AbiAlignmentAdvanced{ .scalar = alignment }; }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => return AbiAlignmentAdvanced{ .scalar = 16 }, - }, - // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, - .anyerror, - .error_set_inferred, - .error_set_single, - .error_set, - .error_set_merged, - => return AbiAlignmentAdvanced{ .scalar = 2 }, + .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()), - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat), + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return AbiAlignmentAdvanced{ .scalar = 2 }, - .vector => { - const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), target, opt_sema); - const bytes = ((bits * len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); - return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; - }, + // represents machine code; not a pointer + .func_type => |func_type| return AbiAlignmentAdvanced{ + .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| + @intCast(u32, a) + else + target_util.defaultFunctionAlignment(target), + }, - .i16, .u16 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(16, target) }, - .u29 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(29, target) }, - .i32, .u32 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(32, target) }, - .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) }, - .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) }, + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return AbiAlignmentAdvanced{ .scalar = 1 }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; - return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(bits, target) }; - }, + .usize, + .isize, + .export_options, + .extern_options, + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, + .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, + .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, + .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, + .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, + .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, + .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, + .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, + .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, + .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - switch (child_type.zigTypeTag()) { - .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), - .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, - else => {}, - } - - switch (strat) { - .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) { - return AbiAlignmentAdvanced{ .scalar = 1 }; - } - return child_type.abiAlignmentAdvanced(target, strat); + .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, + .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, + else => return AbiAlignmentAdvanced{ .scalar = 8 }, }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(target, strat)) { - .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, - .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - } - }, - - .error_union => { - // This code needs to be kept in sync with the equivalent switch prong - // in abiSizeAdvanced. - const data = ty.castTag(.error_union).?.data; - const code_align = abiAlignment(Type.anyerror, target); - switch (strat) { - .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) { - return AbiAlignmentAdvanced{ .scalar = code_align }; - } - return AbiAlignmentAdvanced{ .scalar = @max( - code_align, - (try data.payload.abiAlignmentAdvanced(target, strat)).scalar, - ) }; - }, - .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(target, strat)) { - .scalar => |payload_align| { - return AbiAlignmentAdvanced{ - .scalar = @max(code_align, payload_align), - }; - }, - .val => {}, - } - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - }, - } - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (opt_sema) |sema| { - if (struct_obj.status == .field_types_wip) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; - } - _ = try sema.resolveTypeFields(ty); - } - if (!struct_obj.haveFieldTypes()) switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }; - if (struct_obj.layout == .Packed) { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; }, - .eager => {}, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => return AbiAlignmentAdvanced{ .scalar = 16 }, + }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiAlignmentAdvanced{ .scalar = 2 }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => return AbiAlignmentAdvanced{ .scalar = 0 }, + + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiAlignmentAdvanced{ .scalar = 0 }; + + if (opt_sema) |sema| { + if (struct_obj.status == .field_types_wip) { + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + } + _ = try sema.resolveTypeFields(ty); } - assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) }; - } - - const fields = ty.structFields(); - var big_align: u32 = 0; - for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) continue; - - const field_align = if (field.abi_align != 0) - field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { - .scalar => |a| a, - .val => switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, + if (!struct_obj.haveFieldTypes()) switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, }; - big_align = @max(big_align, field_align); + if (struct_obj.layout == .Packed) { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; + } - // This logic is duplicated in Module.Struct.Field.alignment. - if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - big_align = @max(big_align, 16); + const fields = ty.structFields(mod); + var big_align: u32 = 0; + for (fields.values()) |field| { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, + else => |e| return e, + })) continue; + + const field_align = if (field.abi_align != 0) + field.abi_align + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |a| a, + .val => switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, + }, + }; + big_align = @max(big_align, field_align); + + // This logic is duplicated in Module.Struct.Field.alignment. + if (struct_obj.layout == .Extern or target.ofmt == .c) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { + // The C ABI requires 128 bit integer fields of structs + // to be 16-bytes aligned. + big_align = @max(big_align, 16); + } } } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; - }, + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, + .anon_struct_type => |tuple| { + var big_align: u32 = 0; + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if (!(field_ty.toType().hasRuntimeBits(mod))) continue; - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits())) continue; - - switch (try field_ty.abiAlignmentAdvanced(target, strat)) { - .scalar => |field_align| big_align = @max(big_align, field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, + switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = @max(big_align, field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, + }, + } } + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, + + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); + }, + .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, + .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } + } + + fn abiAlignmentAdvancedErrorUnion( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, + payload_ty: Type, + ) Module.CompileError!AbiAlignmentAdvanced { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const code_align = abiAlignment(Type.anyerror, mod); + switch (strat) { + .eager, .sema => { + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, + else => |e| return e, + })) { + return AbiAlignmentAdvanced{ .scalar = code_align }; } - return AbiAlignmentAdvanced{ .scalar = big_align }; + return AbiAlignmentAdvanced{ .scalar = @max( + code_align, + (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, + ) }; }, - - .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(target) }; + .lazy => { + switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |payload_align| { + return AbiAlignmentAdvanced{ + .scalar = @max(code_align, payload_align), + }; + }, + .val => {}, + } + return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }; }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, false); + } + } + + fn abiAlignmentAdvancedOptional( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, + ) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const child_type = ty.optionalChild(mod); + + switch (child_type.zigTypeTag(mod)) { + .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), + .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, + else => {}, + } + + switch (strat) { + .eager, .sema => { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, + else => |e| return e, + })) { + return AbiAlignmentAdvanced{ .scalar = 1 }; + } + return child_type.abiAlignmentAdvanced(mod, strat); }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, true); + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, }, - - .empty_struct, - .void, - .empty_struct_literal, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => return AbiAlignmentAdvanced{ .scalar = 0 }, - - .noreturn, - .inferred_alloc_const, - .inferred_alloc_mut, - => unreachable, - - .generic_poison => unreachable, } } pub fn abiAlignmentAdvancedUnion( ty: Type, - target: Target, + mod: *Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3195,6 +1210,7 @@ pub const Type = extern union { // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. + const target = mod.getTarget(); return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; } _ = try sema.resolveTypeFields(ty); @@ -3202,32 +1218,41 @@ pub const Type = extern union { if (!union_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // union layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, }; if (union_obj.fields.count() == 0) { if (have_tag) { - return abiAlignmentAdvanced(union_obj.tag_ty, target, strat); + return abiAlignmentAdvanced(union_obj.tag_ty, mod, strat); } else { return AbiAlignmentAdvanced{ .scalar = @boolToInt(union_obj.layout == .Extern) }; } } var max_align: u32 = 0; - if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target); + if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })).toValue() }, }, }; max_align = @max(max_align, field_align); @@ -3236,17 +1261,17 @@ pub const Type = extern union { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(Type.comptime_int, x), } } /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, target: Target) u64 { - return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar; + pub fn abiSize(ty: Type, mod: *Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; } const AbiSizeAdvanced = union(enum) { @@ -3262,315 +1287,310 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - target: Target, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { - switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer - .function => unreachable, // represents machine code; not a pointer - .@"opaque" => unreachable, // no size available - .noreturn => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - .modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields + const target = mod.getTarget(); - .anyopaque, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .single_const_pointer_to_comptime_int, - .empty_struct_literal, - .empty_struct, - .void, - => return AbiSizeAdvanced{ .scalar = 0 }, + switch (ty.toIntern()) { + .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { - .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, - .eager => {}, - } - assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) }; + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; }, - else => { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, + .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + switch (try array_type.child.toType().abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return .{ .scalar = len * elem_size }, + .val => switch (strat) { + .sema, .eager => unreachable, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + }, + } + }, + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + }; + const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); + const elem_bits = @intCast(u32, elem_bits_u64); + const total_bits = elem_bits * vector_type.len; + const total_bytes = (total_bits + 7) / 8; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + }; + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, + + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return AbiSizeAdvanced{ .scalar = 2 }, + + .error_union_type => |error_union_type| { + const payload_ty = error_union_type.payload_type.toType(); + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const code_size = abiSize(Type.anyerror, mod); + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(payload_ty, mod); + const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + }, + }; + + var size: u64 = 0; + if (code_align > payload_align) { + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + } else { + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + } + return AbiSizeAdvanced{ .scalar = size }; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, + }, + + .usize, + .isize, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiSizeAdvanced{ .scalar = 2 }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => |struct_type| switch (ty.containerLayout(mod)) { + .Packed => { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiSizeAdvanced{ .scalar = 0 }; + + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; + }, + else => { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiSizeAdvanced{ .scalar = 0 }; + if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }; + }, + .eager => {}, + } + const field_count = ty.structFieldCount(mod); + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + }, + .anon_struct_type => |tuple| { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - } - }, - .eager => {}, + .lazy, .eager => {}, } - const field_count = ty.structFieldCount(); + const field_count = tuple.types.len; if (field_count == 0) { return AbiSizeAdvanced{ .scalar = 0 }; } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) }; + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, - }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) }; - }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, true); - }, - - .u1, - .u8, - .i8, - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - => return AbiSizeAdvanced{ .scalar = 1 }, - - .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, - .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, - .array => { - const payload = ty.castTag(.array).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { - .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { - .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, - - .vector => { - const payload = ty.castTag(.vector).?.data; - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, - }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(target, opt_sema); - const total_bits = elem_bits * payload.len; - const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(target, strat)) { - .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, - }; - const result = std.mem.alignForwardGeneric(u64, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, - - .isize, - .usize, - .@"anyframe", - .anyframe_T, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, - }; - const u80_ty = initPayload(&payload.base); - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) }; + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, - }, + .opaque_type => unreachable, // no size available + .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, - // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, - .anyerror, - .error_set_inferred, - .error_set, - .error_set_merged, - .error_set_single, - => return AbiSizeAdvanced{ .scalar = 2 }, - - .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) }, - .u29 => return AbiSizeAdvanced{ .scalar = intAbiSize(29, target) }, - .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, - .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, - .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) }; - }, - - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); - - if (child_type.isNoReturn()) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, - else => |e| return e, - })) return AbiSizeAdvanced{ .scalar = 1 }; - - if (ty.optionalReprIsPayload()) { - return abiSizeAdvanced(child_type, target, strat); - } - - const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - }; - - // Optional types are represented as a struct with the child type as the first - // field and a boolean as the second. Since the child type's abi alignment is - // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal - // to the child type's ABI alignment. - return AbiSizeAdvanced{ - .scalar = child_type.abiAlignment(target) + payload_size, - }; - }, - - .error_union => { - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, target); - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, target); - const payload_align = abiAlignment(data.payload, target); - const payload_size = switch (try data.payload.abiSizeAdvanced(target, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - }; - - var size: u64 = 0; - if (code_align > payload_align) { - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - } else { - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - } - return AbiSizeAdvanced{ .scalar = size }; + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, } } pub fn abiSizeAdvancedUnion( ty: Type, - target: Target, + mod: *Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, ) Module.CompileError!AbiSizeAdvanced { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!union_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, .eager => {}, } - return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) }; + return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; + } + + fn abiSizeAdvancedOptional( + ty: Type, + mod: *Module, + strat: AbiAlignmentAdvancedStrat, + ) Module.CompileError!AbiSizeAdvanced { + const child_ty = ty.optionalChild(mod); + + if (child_ty.isNoReturn(mod)) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + + if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + else => |e| return e, + })) return AbiSizeAdvanced{ .scalar = 1 }; + + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_ty, mod, strat); + } + + const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } })).toValue() }, + }, + }; + + // Optional types are represented as a struct with the child type as the first + // field and a boolean as the second. Since the child type's abi alignment is + // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal + // to the child type's ABI alignment. + return AbiSizeAdvanced{ + .scalar = child_ty.abiAlignment(mod) + payload_size, + }; } fn intAbiSize(bits: u16, target: Target) u64 { @@ -3585,8 +1605,8 @@ pub const Type = extern union { ); } - pub fn bitSize(ty: Type, target: Target) u64 { - return bitSizeAdvanced(ty, target, null) catch unreachable; + pub fn bitSize(ty: Type, mod: *Module) u64 { + return bitSizeAdvanced(ty, mod, null) catch unreachable; } /// If you pass `opt_sema`, any recursive type resolutions will happen if @@ -3594,568 +1614,318 @@ pub const Type = extern union { /// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, - target: Target, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!u64 { + const target = mod.getTarget(); + const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer - .function => unreachable, // represents machine code; not a pointer - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .single_const_pointer_to_comptime_int => unreachable, - .empty_struct => unreachable, - .empty_struct_literal => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .@"opaque" => unreachable, - .generic_poison => unreachable, - .void => return 0, - .bool, .u1 => return 1, - .u8, .i8 => return 8, - .i16, .u16, .f16 => return 16, - .u29 => return 29, - .i32, .u32, .f32 => return 32, - .i64, .u64, .f64 => return 64, - .f80 => return 80, - .u128, .i128, .f128 => return 128, + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), + }, + .anyframe_type => return target.ptrBitWidth(), - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + if (len == 0) return 0; + const elem_ty = array_type.child.toType(); + const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return 16, + + .error_union_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(target, opt_sema); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, - .tuple, .anon_struct => { + .anon_struct_type => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; - } - var total: u64 = 0; - for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, target, opt_sema); - } - return total; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return try bitSizeAdvanced(int_tag_ty, target, opt_sema); - }, - - .@"union", .union_safety_tagged, .union_tagged => { + .union_type => |union_type| { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.unionPtr(union_type.index); assert(union_obj.haveFieldTypes()); var size: u64 = 0; for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, target, opt_sema)); + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); } return size; }, + .opaque_type => unreachable, + .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), - .vector => { - const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); - return elem_bit_size * payload.len; - }, - .array_u8 => return 8 * ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), - .array => { - const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); - if (elem_size == 0 or payload.len == 0) - return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); - return (payload.len - 1) * 8 * elem_size + elem_bit_size; - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - const elem_size = std.math.max( - payload.elem_type.abiAlignment(target), - payload.elem_type.abiSize(target), - ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); - return payload.len * 8 * elem_size + elem_bit_size; - }, - - .isize, - .usize, - .@"anyframe", - .anyframe_T, - => return target.ptrBitWidth(), - - .const_slice, - .mut_slice, - => return target.ptrBitWidth() * 2, - - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return target.ptrBitWidth() * 2, - - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - return target.ptrBitWidth(); - }, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => { - return target.ptrBitWidth(); - }, - - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), - }, - - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .error_set, - .error_set_single, - .anyerror_void_error_union, - .anyerror, - .error_set_inferred, - .error_set_merged, - => return 16, // TODO revisit this when we have the concept of the error tag type - - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data, - - .optional, .error_union => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, target, strat)).scalar * 8; - }, - - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => @panic("TODO at some point we gotta resolve builtin types"), + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, } } /// Returns true if the type's layout is already resolved and it is safe /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn layoutIsResolved(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - return struct_ty.data.haveLayout(); + if (mod.typeToStruct(ty)) |struct_obj| { + return struct_obj.haveLayout(); } return true; }, .Union => { - if (ty.cast(Payload.Union)) |union_ty| { - return union_ty.data.haveLayout(); + if (mod.typeToUnion(ty)) |union_obj| { + return union_obj.haveLayout(); } return true; }, .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(); + if (ty.arrayLenIncludingSentinel(mod) == 0) return true; + return ty.childType(mod).layoutIsResolved(mod); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); - return payload_ty.layoutIsResolved(); + const payload_ty = ty.optionalChild(mod); + return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - return payload_ty.layoutIsResolved(); + const payload_ty = ty.errorUnionPayload(mod); + return payload_ty.layoutIsResolved(mod); }, else => return true, } } - pub fn isSinglePointer(self: Type) bool { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, - .inferred_alloc_const, - .inferred_alloc_mut, - => true, - - .pointer => self.castTag(.pointer).?.data.size == .One, - + pub fn isSinglePointer(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size == .One, else => false, }; } /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty).?; + pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; } /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type) ?std.builtin.Type.Pointer.Size { - return switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => .Slice, - - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .Many, - - .c_const_pointer, - .c_mut_pointer, - => .C, - - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, - .inferred_alloc_const, - .inferred_alloc_mut, - => .One, - - .pointer => ty.castTag(.pointer).?.data.size, - + pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size, else => null, }; } - pub fn isSlice(self: Type) bool { - return switch (self.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => true, - - .pointer => self.castTag(.pointer).?.data.size == .Slice, - + pub fn isSlice(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, else => false, }; } - pub const SlicePtrFieldTypeBuffer = union { - elem_type: Payload.ElemType, - pointer: Payload.Pointer, - }; - - pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { - switch (self.tag()) { - .const_slice_u8 => return Type.initTag(.manyptr_const_u8), - .const_slice_u8_sentinel_0 => return Type.initTag(.manyptr_const_u8_sentinel_0), - - .const_slice => { - const elem_type = self.castTag(.const_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .mut_slice => { - const elem_type = self.castTag(.mut_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - - .pointer => { - const payload = self.castTag(.pointer).?.data; - assert(payload.size == .Slice); - - if (payload.sentinel != null or - payload.@"align" != 0 or - payload.@"addrspace" != .generic or - payload.bit_offset != 0 or - payload.host_size != 0 or - payload.vector_index != .none or - payload.@"allowzero" or - payload.@"volatile") - { - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, - }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - } else if (payload.mutable) { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = payload.pointee_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - } else { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = payload.pointee_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - } - }, - - else => unreachable, - } + pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { + return mod.intern_pool.slicePtrType(ty.toIntern()).toType(); } - pub fn isConstPtr(self: Type) bool { - return switch (self.tag()) { - .single_const_pointer, - .many_const_pointer, - .c_const_pointer, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => true, - - .pointer => !self.castTag(.pointer).?.data.mutable, - + pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_const, else => false, }; } - pub fn isVolatilePtr(self: Type) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"volatile"; - }, + pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, &mod.intern_pool); + } + + pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { + return switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_volatile, else => false, }; } - pub fn isAllowzeroPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"allowzero"; - }, - else => return self.zigTypeTag() == .Optional, + pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, + .opt_type => true, + else => false, }; } - pub fn isCPtr(self: Type) bool { - return switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - => return true, - - .pointer => self.castTag(.pointer).?.data.size == .C, - - else => return false, + pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, }; } - pub fn isPtrAtRuntime(self: Type) bool { - switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .manyptr_u8, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_const_pointer_to_comptime_int, - .single_mut_pointer, - => return true, - - .pointer => switch (self.castTag(.pointer).?.data.size) { - .Slice => return false, - .One, .Many, .C => return true, + pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => false, + .One, .Many, .C => true, }, - - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag() != .Pointer) return false; - const info = child_type.ptrInfo().data; - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.flags.size) { + .Slice, .C => false, + .Many, .One => !p.flags.is_allowzero, + }, + else => false, }, - - else => return false, - } + else => false, + }; } /// For pointer-like optionals, returns true, otherwise returns the allowzero property /// of pointers. - pub fn ptrAllowsZero(ty: Type) bool { - if (ty.isPtrLikeOptional()) { + pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { return true; } - return ty.ptrInfo().data.@"allowzero"; + return ty.ptrInfo(mod).@"allowzero"; } /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type) bool { - switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - - .optional => { - const child_ty = ty.castTag(.optional).?.data; - switch (child_ty.zigTypeTag()) { - .Pointer => { - const info = child_ty.ptrInfo().data; - switch (info.size) { - .C => return false, - .Slice, .Many, .One => return !info.@"allowzero", - } - }, - .ErrorSet => return true, - else => return false, - } + pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, + .error_set_type => true, + else => false, }, - - .pointer => return ty.castTag(.pointer).?.data.size == .C, - - else => return false, - } + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; } /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. - pub fn isPtrLikeOptional(self: Type) bool { - switch (self.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - - .optional => { - const child_ty = self.castTag(.optional).?.data; - if (child_ty.zigTypeTag() != .Pointer) return false; - const info = child_ty.ptrInfo().data; - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } + /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. + pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.flags.is_allowzero, + }, + else => false, }, - - .pointer => return self.castTag(.pointer).?.data.size == .C, - - else => return false, - } + else => false, + }; } /// For *[N]T, returns [N]T. /// For *T, returns T. /// For [*]T, returns T. - pub fn childType(ty: Type) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), - .pointer => ty.castTag(.pointer).?.data.pointee_type, - - else => unreachable, - }; + pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, &mod.intern_pool); } - /// Asserts the type is a pointer or array type. - /// TODO this is deprecated in favor of `childType`. - pub const elemType = childType; + pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { + return ip.childType(ty.toIntern()).toType(); + } /// For *[N]T, returns T. /// For ?*T, returns T. @@ -4166,283 +1936,178 @@ pub const Type = extern union { /// For [N]T, returns T. /// For []T, returns T. /// For anyframe->T, returns T. - pub fn elemType2(ty: Type) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .single_const_pointer, - .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(), - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), - .pointer => { - const info = ty.castTag(.pointer).?.data; - const child_ty = info.pointee_type; - if (info.size == .One) { - return child_ty.shallowElemType(); - } else { - return child_ty; - } + pub fn elemType2(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One => ptr_type.child.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.child.toType(), }, - .optional => ty.castTag(.optional).?.data.childType(), - .optional_single_mut_pointer => ty.castPointer().?.data, - .optional_single_const_pointer => ty.castPointer().?.data, - - .anyframe_T => ty.castTag(.anyframe_T).?.data, - .@"anyframe" => Type.void, - + .anyframe_type => |child| { + assert(child != .none); + return child.toType(); + }, + .vector_type => |vector_type| vector_type.child.toType(), + .array_type => |array_type| array_type.child.toType(), + .opt_type => |child| mod.intern_pool.childType(child).toType(), else => unreachable, }; } - fn shallowElemType(child_ty: Type) Type { - return switch (child_ty.zigTypeTag()) { - .Array, .Vector => child_ty.childType(), + fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { + .Array, .Vector => child_ty.childType(mod), else => child_ty, }; } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type) Type { - return switch (ty.zigTypeTag()) { - .Vector => ty.childType(), + pub fn scalarType(ty: Type, mod: *Module) Type { + return switch (ty.zigTypeTag(mod)) { + .Vector => ty.childType(mod), else => ty, }; } /// Asserts that the type is an optional. - /// Resulting `Type` will have inner memory referencing `buf`. /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, buf: *Payload.ElemType) Type { - return switch (ty.tag()) { - .optional => ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - buf.* = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); + pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child| child.toType(), + .ptr_type => |ptr_type| b: { + assert(ptr_type.flags.size == .C); + break :b ty; }, - .optional_single_const_pointer => { - buf.* = .{ - .base = .{ .tag = .single_const_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, - - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, - else => unreachable, }; } - /// Asserts that the type is an optional. - /// Same as `optionalChild` but allocates the buffer if needed. - pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type { - switch (ty.tag()) { - .optional => return ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - return Tag.single_mut_pointer.create(allocator, ty.castPointer().?.data); - }, - .optional_single_const_pointer => { - return Tag.single_const_pointer.create(allocator, ty.castPointer().?.data); - }, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, - - else => unreachable, - } - } - /// Returns the tag type of a union, if the type is a union and it has a tag type. /// Otherwise, returns `null`. - pub fn unionTagType(ty: Type) ?Type { - return switch (ty.tag()) { - .union_tagged => { - const union_obj = ty.castTag(.union_tagged).?.data; - assert(union_obj.haveFieldTypes()); - return union_obj.tag_ty; + pub fn unionTagType(ty: Type, mod: *Module) ?Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .union_type => |union_type| switch (union_type.runtime_tag) { + .tagged => { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + return union_obj.tag_ty; + }, + else => null, }, - - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - else => null, }; } /// Same as `unionTagType` but includes safety tag. /// Codegen should use this version. - pub fn unionTagTypeSafety(ty: Type) ?Type { - return switch (ty.tag()) { - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .union_type => |union_type| { + if (!union_type.hasTag()) return null; + const union_obj = mod.unionPtr(union_type.index); assert(union_obj.haveFieldTypes()); return union_obj.tag_ty; }, - - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - else => null, }; } /// Asserts the type is a union; returns the tag type, even if the tag will /// not be stored at runtime. - pub fn unionTagTypeHypothetical(ty: Type) Type { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { + const union_obj = mod.typeToUnion(ty).?; assert(union_obj.haveFieldTypes()); return union_obj.tag_ty; } - pub fn unionFields(ty: Type) Module.Union.Fields { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionFields(ty: Type, mod: *Module) Module.Union.Fields { + const union_obj = mod.typeToUnion(ty).?; assert(union_obj.haveFieldTypes()); return union_obj.fields; } pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) Type { - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const index = ty.unionTagFieldIndex(enum_tag, mod).?; assert(union_obj.haveFieldTypes()); return union_obj.fields.values()[index].ty; } pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod) orelse return null; - const name = union_obj.tag_ty.enumFieldName(index); + const name = union_obj.tag_ty.enumFieldName(index, mod); return union_obj.fields.getIndex(name); } - pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool { - return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(); + pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { + const union_obj = mod.typeToUnion(ty).?; + return union_obj.hasAllZeroBitFieldTypes(mod); } - pub fn unionGetLayout(ty: Type, target: Target) Module.Union.Layout { - switch (ty.tag()) { - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return union_obj.getLayout(target, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.getLayout(target, true); - }, - else => unreachable, - } + pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout { + const union_type = mod.intern_pool.indexToKey(ty.toIntern()).union_type; + const union_obj = mod.unionPtr(union_type.index); + return union_obj.getLayout(mod, union_type.hasTag()); } - pub fn containerLayout(ty: Type) std.builtin.Type.ContainerLayout { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => .Auto, - .@"struct" => ty.castTag(.@"struct").?.data.layout, - .@"union" => ty.castTag(.@"union").?.data.layout, - .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, - .union_tagged => ty.castTag(.union_tagged).?.data.layout, + pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; + return struct_obj.layout; + }, + .anon_struct_type => .Auto, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.layout; + }, else => unreachable, }; } /// Asserts that the type is an error union. - pub fn errorUnionPayload(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.initTag(.void), - .error_union => self.castTag(.error_union).?.data.payload, - else => unreachable, - }; + pub fn errorUnionPayload(ty: Type, mod: *Module) Type { + return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type.toType(); } - pub fn errorUnionSet(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.initTag(.anyerror), - .error_union => self.castTag(.error_union).?.data.error_set, - else => unreachable, - }; + /// Asserts that the type is an error union. + pub fn errorUnionSet(ty: Type, mod: *Module) Type { + return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType(); } /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type) bool { - switch (ty.tag()) { - .anyerror => return false, - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; - // Can't know for sure. - if (!inferred_error_set.is_resolved) return false; - if (inferred_error_set.is_anyerror) return false; - return inferred_error_set.errors.count() == 0; + pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + return switch (ty.toIntern()) { + .anyerror_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .inferred_error_set_type => |index| { + const inferred_error_set = mod.inferredErrorSetPtr(index); + // Can't know for sure. + if (!inferred_error_set.is_resolved) return false; + if (inferred_error_set.is_anyerror) return false; + return inferred_error_set.errors.count() == 0; + }, + else => unreachable, }, - .error_set_single => return false, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - return err_set_obj.names.count() == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - return name_map.count() == 0; - }, - else => unreachable, - } + }; } /// Returns true if it is an error set that includes anyerror, false otherwise. /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. - pub fn isAnyError(ty: Type) bool { - return switch (ty.tag()) { - .anyerror => true, - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, - else => false, + pub fn isAnyError(ty: Type, mod: *Module) bool { + return switch (ty.toIntern()) { + .anyerror_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, + else => false, + }, }; } - pub fn isError(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .ErrorUnion, .ErrorSet => true, else => false, }; @@ -4451,230 +2116,221 @@ pub const Type = extern union { /// Returns whether ty, which must be an error set, includes an error `name`. /// Might return a false negative if `ty` is an inferred error set and not fully /// resolved yet. - pub fn errorSetHasField(ty: Type, name: []const u8) bool { - if (ty.isAnyError()) { - return true; - } + pub fn errorSetHasFieldIp( + ip: *const InternPool, + ty: InternPool.Index, + name: InternPool.NullTerminatedString, + ) bool { + return switch (ty) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| { + return error_set_type.nameIndex(ip, name) != null; + }, + .inferred_error_set_type => |index| { + const ies = ip.inferredErrorSetPtrConst(index); + if (ies.is_anyerror) return true; + return ies.errors.contains(name); + }, + else => unreachable, + }, + }; + } - switch (ty.tag()) { - .error_set_single => { - const data = ty.castTag(.error_set_single).?.data; - return std.mem.eql(u8, data, name); + /// Returns whether ty, which must be an error set, includes an error `name`. + /// Might return a false negative if `ty` is an inferred error set and not fully + /// resolved yet. + pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return error_set_type.nameIndex(ip, field_name_interned) != null; + }, + .inferred_error_set_type => |index| { + const ies = ip.inferredErrorSetPtr(index); + if (ies.is_anyerror) return true; + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ies.errors.contains(field_name_interned); + }, + else => unreachable, }, - .error_set_inferred => { - const data = ty.castTag(.error_set_inferred).?.data; - return data.errors.contains(name); - }, - .error_set_merged => { - const data = ty.castTag(.error_set_merged).?.data; - return data.contains(name); - }, - .error_set => { - const data = ty.castTag(.error_set).?.data; - return data.names.contains(name); - }, - else => unreachable, - } + }; } /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type) u64 { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.len, - .array => ty.castTag(.array).?.data.len, - .array_sentinel => ty.castTag(.array_sentinel).?.data.len, - .array_u8 => ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data, - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct, .empty_struct_literal => 0, + pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return arrayLenIp(ty, &mod.intern_pool); + } + + pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { + return switch (ip.indexToKey(ty.toIntern())) { + .vector_type => |vector_type| vector_type.len, + .array_type => |array_type| array_type.len, + .struct_type => |struct_type| { + const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; + return struct_obj.fields.count(); + }, + .anon_struct_type => |tuple| tuple.types.len, else => unreachable, }; } - pub fn arrayLenIncludingSentinel(ty: Type) u64 { - return ty.arrayLen() + @boolToInt(ty.sentinel() != null); + pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return ty.arrayLen(mod) + @boolToInt(ty.sentinel(mod) != null); } - pub fn vectorLen(ty: Type) u32 { - return switch (ty.tag()) { - .vector => @intCast(u32, ty.castTag(.vector).?.data.len), - .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), - .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), + pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type => |vector_type| vector_type.len, + .anon_struct_type => |tuple| @intCast(u32, tuple.types.len), else => unreachable, }; } /// Asserts the type is an array, pointer or vector. - pub fn sentinel(self: Type) ?Value { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, - .vector, - .array, - .array_u8, - .manyptr_u8, - .manyptr_const_u8, - .const_slice_u8, - .const_slice, - .mut_slice, - .tuple, - .empty_struct_literal, - .@"struct", - => return null, + pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type, + .struct_type, + .anon_struct_type, + => null, - .pointer => return self.castTag(.pointer).?.data.sentinel, - .array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel, - - .array_u8_sentinel_0, - .const_slice_u8_sentinel_0, - .manyptr_const_u8_sentinel_0, - => return Value.zero, + .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, else => unreachable, }; } /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type) bool { - return self.isSignedInt() or self.isUnsignedInt(); + pub fn isInt(self: Type, mod: *const Module) bool { + return self.isSignedInt(mod) or self.isUnsignedInt(mod); } /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(self: Type) bool { - return switch (self.tag()) { - .int_signed, - .i8, - .isize, - .c_char, - .c_short, - .c_int, - .c_long, - .c_longlong, - .i16, - .i32, - .i64, - .i128, - => true, - - else => false, + pub fn isSignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type, .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .signed, + else => false, + }, }; } /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(self: Type) bool { - return switch (self.tag()) { - .int_unsigned, - .usize, - .c_ushort, - .c_uint, - .c_ulong, - .c_ulonglong, - .u1, - .u8, - .u16, - .u29, - .u32, - .u64, - .u128, - => true, - - else => false, + pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .unsigned, + else => false, + }, }; } /// Returns true for integers, enums, error sets, and packed structs. /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isAbiInt(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Enum, .ErrorSet => true, - .Struct => ty.containerLayout() == .Packed, + .Struct => ty.containerLayout(mod) == .Packed, else => false, }; } /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(self: Type, target: Target) std.builtin.Type.Int { - var ty = self; - while (true) switch (ty.tag()) { - .int_unsigned => return .{ - .signedness = .unsigned, - .bits = ty.castTag(.int_unsigned).?.data, - }, - .int_signed => return .{ - .signedness = .signed, - .bits = ty.castTag(.int_signed).?.data, - }, - .u1 => return .{ .signedness = .unsigned, .bits = 1 }, - .u8 => return .{ .signedness = .unsigned, .bits = 8 }, - .i8 => return .{ .signedness = .signed, .bits = 8 }, - .u16 => return .{ .signedness = .unsigned, .bits = 16 }, - .i16 => return .{ .signedness = .signed, .bits = 16 }, - .u29 => return .{ .signedness = .unsigned, .bits = 29 }, - .u32 => return .{ .signedness = .unsigned, .bits = 32 }, - .i32 => return .{ .signedness = .signed, .bits = 32 }, - .u64 => return .{ .signedness = .unsigned, .bits = 64 }, - .i64 => return .{ .signedness = .signed, .bits = 64 }, - .u128 => return .{ .signedness = .unsigned, .bits = 128 }, - .i128 => return .{ .signedness = .signed, .bits = 128 }, - .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, - .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { + const target = mod.getTarget(); + var ty = starting_ty; - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_obj = ty.castTag(.enum_simple).?.data; - const field_count = enum_obj.fields.count(); - if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; - return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; - }, - - .error_set, .error_set_single, .anyerror, .error_set_inferred, .error_set_merged => { + while (true) switch (ty.toIntern()) { + .anyerror_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; }, + .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, + .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout == .Packed); + ty = struct_obj.backing_int_ty; + }, + .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), + .vector_type => |vector_type| ty = vector_type.child.toType(), - .vector => ty = ty.castTag(.vector).?.data.elem_type, + // TODO revisit this when error sets support custom int types + .error_set_type, .inferred_error_set_type => return .{ .signedness = .unsigned, .bits = 16 }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - ty = struct_obj.backing_int_ty; + .anon_struct_type => unreachable, + + .ptr_type => unreachable, + .anyframe_type => unreachable, + .array_type => unreachable, + + .opt_type => unreachable, + .error_union_type => unreachable, + .func_type => unreachable, + .simple_type => unreachable, // handled via Index enum tag above + + .union_type => unreachable, + .opaque_type => unreachable, + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, - - else => unreachable, }; } - pub fn isNamedInt(self: Type) bool { - return switch (self.tag()) { - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, + pub fn isNamedInt(ty: Type) bool { + return switch (ty.toIntern()) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, => true, else => false, @@ -4682,14 +2338,14 @@ pub const Type = extern union { } /// Returns `false` for `comptime_float`. - pub fn isRuntimeFloat(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, + pub fn isRuntimeFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, => true, else => false, @@ -4697,15 +2353,15 @@ pub const Type = extern union { } /// Returns `true` for `comptime_float`. - pub fn isAnyFloat(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_float, + pub fn isAnyFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_float_type, => true, else => false, @@ -4714,431 +2370,304 @@ pub const Type = extern union { /// Asserts the type is a fixed-size float or comptime_float. /// Returns 128 for comptime_float types. - pub fn floatBits(self: Type, target: Target) u16 { - return switch (self.tag()) { - .f16 => 16, - .f32 => 32, - .f64 => 64, - .f80 => 80, - .f128, .comptime_float => 128, - .c_longdouble => target.c_type_bit_size(.longdouble), + pub fn floatBits(ty: Type, target: Target) u16 { + return switch (ty.toIntern()) { + .f16_type => 16, + .f32_type => 32, + .f64_type => 64, + .f80_type => 80, + .f128_type, .comptime_float_type => 128, + .c_longdouble_type => target.c_type_bit_size(.longdouble), else => unreachable, }; } - /// Asserts the type is a function. - pub fn fnParamLen(self: Type) usize { - return switch (self.tag()) { - .fn_noreturn_no_args => 0, - .fn_void_no_args => 0, - .fn_naked_noreturn_no_args => 0, - .fn_ccc_void_no_args => 0, - .function => self.castTag(.function).?.data.param_types.len, - - else => unreachable, - }; + /// Asserts the type is a function or a function pointer. + pub fn fnReturnType(ty: Type, mod: *Module) Type { + return fnReturnTypeIp(ty, &mod.intern_pool); } - /// Asserts the type is a function. The length of the slice must be at least the length - /// given by `fnParamLen`. - pub fn fnParamTypes(self: Type, types: []Type) void { - switch (self.tag()) { - .fn_noreturn_no_args => return, - .fn_void_no_args => return, - .fn_naked_noreturn_no_args => return, - .fn_ccc_void_no_args => return, - .function => { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - }, - + pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type { + return switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type.return_type, + .func_type => |func_type| func_type.return_type, else => unreachable, - } + }.toType(); } /// Asserts the type is a function. - pub fn fnParamType(self: Type, index: usize) Type { - switch (self.tag()) { - .function => { - const payload = self.castTag(.function).?.data; - return payload.param_types[index]; - }, - - else => unreachable, - } + pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; } - /// Asserts the type is a function. - pub fn fnReturnType(self: Type) Type { - return switch (self.tag()) { - .fn_noreturn_no_args => Type.initTag(.noreturn), - .fn_naked_noreturn_no_args => Type.initTag(.noreturn), - - .fn_void_no_args, - .fn_ccc_void_no_args, - => Type.initTag(.void), - - .function => self.castTag(.function).?.data.return_type, - - else => unreachable, - }; - } - - /// Asserts the type is a function. - pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return switch (self.tag()) { - .fn_noreturn_no_args => .Unspecified, - .fn_void_no_args => .Unspecified, - .fn_naked_noreturn_no_args => .Naked, - .fn_ccc_void_no_args => .C, - .function => self.castTag(.function).?.data.cc, - - else => unreachable, - }; - } - - /// Asserts the type is a function. - pub fn fnCallingConventionAllowsZigTypes(target: Target, cc: std.builtin.CallingConvention) bool { - return switch (cc) { - .Unspecified, .Async, .Inline => true, - // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. - // The goal is to experiment with more integrated CPU/GPU code. - .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, - else => false, - }; - } - - pub fn isValidParamType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque, .NoReturn => false, else => true, }; } - pub fn isValidReturnType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque => false, else => true, }; } /// Asserts the type is a function. - pub fn fnIsVarArgs(self: Type) bool { - return switch (self.tag()) { - .fn_noreturn_no_args => false, - .fn_void_no_args => false, - .fn_naked_noreturn_no_args => false, - .fn_ccc_void_no_args => false, - .function => self.castTag(.function).?.data.is_var_args, - - else => unreachable, - }; + pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; } - pub fn fnInfo(ty: Type) Payload.Function.Data { - return switch (ty.tag()) { - .fn_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_naked_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Naked, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_ccc_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .C, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .function => ty.castTag(.function).?.data, - - else => unreachable, - }; - } - - pub fn isNumeric(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_int, - .comptime_float, - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .int_unsigned, - .int_signed, + pub fn isNumeric(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_int_type, + .comptime_float_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, => true, - else => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => true, + else => false, + }, }; } /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; - while (true) switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_int, - .comptime_float, - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .bool, - .type, - .anyerror, - .error_union, - .error_set_single, - .error_set, - .error_set_merged, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - .single_const_pointer_to_comptime_int, - .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .anyopaque, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .enum_literal, - .anyerror_void_error_union, - .error_set_inferred, - .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .pointer, - => return null, - .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, + while (true) switch (ty.toIntern()) { + .empty_struct_type => return Value.empty_struct, - .@"struct" => { - const s = ty.castTag(.@"struct").?.data; - assert(s.haveFieldTypes()); - for (s.fields.values()) |field| { - if (field.is_comptime) continue; - if (field.ty.onePossibleValue() != null) continue; - return null; - } - return Value.initTag(.empty_struct_value); - }, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; - if (is_comptime) continue; - if (tuple.types[i].onePossibleValue() != null) continue; - return null; - } - return Value.initTag(.empty_struct_value); - }, - - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits()) { - return null; - } - assert(enum_numbered.fields.count() == 1); - return enum_numbered.values.keys()[0]; - }, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits()) { - return null; - } - switch (enum_full.fields.count()) { - 0 => return Value.initTag(.unreachable_value), - 1 => if (enum_full.values.count() == 0) { - return Value.zero; // auto-numbered + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); } else { - return enum_full.values.keys()[0]; + return null; + } + }, + + .ptr_type, + .error_union_type, + .func_type, + .anyframe_type, + .error_set_type, + .inferred_error_set_type, + => return null, + + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @boolToInt(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try seq_type.child.toType().onePossibleValue(mod)) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, + } })).toValue(); + } + return null; + }, + .opt_type => |child| { + if (child == .noreturn_type) { + return try mod.nullValue(ty); + } else { + return null; + } + }, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => return null, + + .void => return Value.void, + .noreturn => return Value.@"unreachable", + .null => return Value.null, + .undefined => return Value.undef, + + .generic_poison => unreachable, + }, + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |s| { + assert(s.haveFieldTypes()); + const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count()); + defer mod.gpa.free(field_vals); + for (field_vals, s.fields.values()) |*field_val, field| { + if (field.is_comptime) { + field_val.* = field.default_val; + continue; + } + if (try field.ty.onePossibleValue(mod)) |field_opv| { + field_val.* = try field_opv.intern(field.ty, mod); + } else return null; + } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); + } + + // In this case the struct has no fields at all and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.values) |val| { + if (val == .none) return null; + } + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values); + defer mod.gpa.free(duped_values); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = duped_values }, + } })).toValue(); + }, + + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; + if (union_obj.fields.count() == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } + const only_field = union_obj.fields.values()[0]; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; + const only = try mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), + } }); + return only.toValue(); + }, + .opaque_type => return null, + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = int_opv.toIntern(), + } }); + return only.toValue(); + } + + return null; }, - else => return null, - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), - 1 => return Value.zero, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits()) { - return Value.zero; - } else { - return null; - } - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue() orelse return null; - if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value); - const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue() orelse return null; - _ = tag_val; - _ = val_val; - return Value.initTag(.empty_struct_value); - }, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; - .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.initTag(.void_value), - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.initTag(.null_value), - .undefined => return Value.initTag(.undef), + switch (enum_type.names.len) { + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, + 1 => { + if (enum_type.values.len == 0) { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + } + }, + }, - .int_unsigned, .int_signed => { - if (ty.cast(Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) - return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue() != null) - return Value.initTag(.the_only_possible_value); - return null; - }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, }; } @@ -5146,350 +2675,298 @@ pub const Type = extern union { /// resolves field types rather than asserting they are already resolved. /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. - pub fn comptimeOnly(ty: Type) bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .enum_simple, - => false, + pub fn comptimeOnly(ty: Type, mod: *Module) bool { + return switch (ty.toIntern()) { + .empty_struct_type => false, - .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.child.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return false; + } else { + return child_ty.comptimeOnly(mod); + } + }, + .anyframe_type => |child| { + if (child == .none) return false; + return child.toType().comptimeOnly(mod); + }, + .array_type => |array_type| array_type.child.toType().comptimeOnly(mod), + .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), + .opt_type => |child| child.toType().comptimeOnly(mod), + .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, + .error_set_type, + .inferred_error_set_type, + => false, - .array, - .array_sentinel, - .vector, - => return ty.childType().comptimeOnly(), + // These are function bodies, not function pointers. + .func_type => true, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => |struct_type| { + // A struct with no fields is not comptime-only. + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and field_ty.toType().comptimeOnly(mod)) return true; + } return false; - } else { - return child_ty.comptimeOnly(); - } - }, + }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(); - }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly()) return true; - } - return false; - }, + .opaque_type => false, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } - }, + .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } - }, - - .error_union => return ty.errorUnionPayload().comptimeOnly(), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return child_ty.comptimeOnly(); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(); + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, }; } - pub fn isArrayOrVector(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; + } + + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, else => false, }; } - pub fn isIndexable(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isIndexable(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, - .Struct => ty.isTuple(), + .Struct => ty.isTuple(mod), else => false, }; } - pub fn indexableHasLen(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn indexableHasLen(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, - .Struct => ty.isTuple(), + .Struct => ty.isTuple(mod), else => false, }; } /// Returns null if the type has no namespace. - pub fn getNamespace(self: Type) ?*Module.Namespace { - return switch (self.tag()) { - .@"struct" => &self.castTag(.@"struct").?.data.namespace, - .enum_full => &self.castTag(.enum_full).?.data.namespace, - .enum_nonexhaustive => &self.castTag(.enum_nonexhaustive).?.data.namespace, - .empty_struct => self.castTag(.empty_struct).?.data, - .@"opaque" => &self.castTag(.@"opaque").?.data.namespace, - .@"union" => &self.castTag(.@"union").?.data.namespace, - .union_safety_tagged => &self.castTag(.union_safety_tagged).?.data.namespace, - .union_tagged => &self.castTag(.union_tagged).?.data.namespace, + pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, - else => null, + else => .none, }; } - // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try minIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + /// Returns null if the type has no namespace. + pub fn getNamespace(ty: Type, mod: *Module) ?*Module.Namespace { + return if (getNamespaceIndex(ty, mod).unwrap()) |i| mod.namespacePtr(i) else null; } - /// Asserts that self.zigTypeTag() == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, target: Target) !Value { - assert(ty.zigTypeTag() == .Int); - const info = ty.intInfo(target); + // Works for vectors and vectors of integers. + pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } })).toValue() else scalar; + } - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - if (info.signedness == .unsigned) { - return Value.zero; - } + /// Asserts that the type is an integer. + pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); + if (info.bits == 0) return mod.intValue(dest_ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(dest_ty, n); } - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(dest_ty, res.toConst()); } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try maxIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + /// The returned Value will have type dest_ty. + pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } })).toValue() else scalar; } - /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, target: Target) !Value { - assert(self.zigTypeTag() == .Int); - const info = self.intInfo(target); + /// The returned Value will have type dest_ty. + pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - switch (info.bits - @boolToInt(info.signedness == .signed)) { - 0 => return Value.zero, - 1 => return Value.one, + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, -1), + .unsigned => try mod.intValue(dest_ty, 0), + }, + 1 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, 0), + .unsigned => try mod.intValue(dest_ty, 1), + }, else => {}, } if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(dest_ty, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return Value.Tag.int_u64.create(arena, n); + return mod.intValue(dest_ty, n); }, }; - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(dest_ty, res.toConst()); } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, buffer: *Payload.Bits) Type { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - buffer.* = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - return Type.initPayload(&buffer.base); - }, - .union_tagged => return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), + pub fn intTagType(ty: Type, mod: *Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), + .enum_type => |enum_type| enum_type.tag_ty.toType(), else => unreachable, - } + }; } - pub fn isNonexhaustiveEnum(ty: Type) bool { - return switch (ty.tag()) { - .enum_nonexhaustive => true, + pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, + }, else => false, }; } // Asserts that `ty` is an error set and not `anyerror`. - pub fn errorSetNames(ty: Type) []const []const u8 { - return switch (ty.tag()) { - .error_set_single => blk: { - // Work around coercion problems - const tmp: *const [1][]const u8 = &ty.castTag(.error_set_single).?.data; - break :blk tmp; - }, - .error_set_merged => ty.castTag(.error_set_merged).?.data.keys(), - .error_set => ty.castTag(.error_set).?.data.names.keys(), - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .error_set_type => |x| x.names, + .inferred_error_set_type => |index| { + const inferred_error_set = mod.inferredErrorSetPtr(index); assert(inferred_error_set.is_resolved); assert(!inferred_error_set.is_anyerror); return inferred_error_set.errors.keys(); @@ -5498,133 +2975,43 @@ pub const Type = extern union { }; } - /// Merge lhs with rhs. - /// Asserts that lhs and rhs are both error sets and are resolved. - pub fn errorSetMerge(lhs: Type, arena: Allocator, rhs: Type) !Type { - const lhs_names = lhs.errorSetNames(); - const rhs_names = rhs.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; - try names.ensureUnusedCapacity(arena, lhs_names.len); - for (lhs_names) |name| { - names.putAssumeCapacityNoClobber(name, {}); - } - for (rhs_names) |name| { - try names.put(arena, name, {}); - } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - - return try Tag.error_set_merged.create(arena, names); + pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names; } - pub fn enumFields(ty: Type) Module.EnumFull.NameMap { - return switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields, - .enum_simple => ty.castTag(.enum_simple).?.data.fields, - .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), - else => unreachable, - }; + pub fn enumFieldCount(ty: Type, mod: *Module) usize { + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len; } - pub fn enumFieldCount(ty: Type) usize { - return ty.enumFields().count(); + pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names[field_index]; } - pub fn enumFieldName(ty: Type, field_index: usize) []const u8 { - return ty.enumFields().keys()[field_index]; - } - - pub fn enumFieldIndex(ty: Type, field_name: []const u8) ?usize { - return ty.enumFields().getIndex(field_name); + pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + return enum_type.nameIndex(ip, field_name); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or /// an integer which represents the enum value. Returns the field index in /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { - if (enum_tag.castTag(.enum_field_index)) |payload| { - return @as(usize, payload.data); - } - const S = struct { - fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { - if (int_val.compareAllWithZero(.lt, m)) return null; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, - }; - const end_val = Value.initPayload(&end_payload.base); - if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; - return @intCast(usize, int_val.toUnsignedInt(m.getTarget())); - } - }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), mod); - } else { - return enum_full.values.getIndexContext(enum_tag, .{ - .ty = tag_ty, - .mod = mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), mod); - } else { - return enum_obj.values.getIndexContext(enum_tag, .{ - .ty = tag_ty, - .mod = mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); - return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); - }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), + pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { + .int => enum_tag.toIntern(), + .enum_tag => |info| info.int, else => unreachable, - } + }; + assert(ip.typeOf(int_tag) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, int_tag); } - pub fn structFields(ty: Type) Module.Struct.Fields { - switch (ty.tag()) { - .empty_struct, .empty_struct_literal => return .{}, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{}; assert(struct_obj.haveFieldTypes()); return struct_obj.fields; }, @@ -5632,141 +3019,122 @@ pub const Type = extern union { } } - pub fn structFieldName(ty: Type, field_index: usize) []const u8 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveFieldTypes()); return struct_obj.fields.keys()[field_index]; }, - .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], + .anon_struct_type => |anon_struct| anon_struct.names[field_index], else => unreachable, - } + }; } - pub fn structFieldCount(ty: Type) usize { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldCount(ty: Type, mod: *Module) usize { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; assert(struct_obj.haveFieldTypes()); return struct_obj.fields.count(); }, - .empty_struct, .empty_struct_literal => return 0, - .tuple => return ty.castTag(.tuple).?.data.types.len, - .anon_struct => return ty.castTag(.anon_struct).?.data.types.len, + .anon_struct_type => |anon_struct| anon_struct.types.len, else => unreachable, - } + }; } /// Supports structs and unions. - pub fn structFieldType(ty: Type, index: usize) Type { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.fields.values()[index].ty; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); return union_obj.fields.values()[index].ty; }, - .tuple => return ty.castTag(.tuple).?.data.types[index], - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], + .anon_struct_type => |anon_struct| anon_struct.types[index].toType(), else => unreachable, - } + }; } - pub fn structFieldAlign(ty: Type, index: usize, target: Target) u32 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(target, struct_obj.layout); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(target); + .anon_struct_type => |anon_struct| { + return anon_struct.types[index].toType().abiAlignment(mod); }, - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(target), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(target), - else => unreachable, - } - } - - pub fn structFieldDefaultValue(ty: Type, index: usize) Value { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.fields.values()[index].default_val; - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - return tuple.values[index]; - }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - return struct_obj.values[index]; + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].normalAlignment(mod); }, else => unreachable, } } - pub fn structFieldValueComptime(ty: Type, index: usize) ?Value { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const val = struct_obj.fields.values()[index].default_val; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return val.toValue(); + }, + .anon_struct_type => |anon_struct| { + const val = anon_struct.values[index]; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return val.toValue(); + }, + else => unreachable, + } + } + + pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; const field = struct_obj.fields.values()[index]; if (field.is_comptime) { - return field.default_val; + return field.default_val.toValue(); } else { - return field.ty.onePossibleValue(); + return field.ty.onePossibleValue(mod); } }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; + .anon_struct_type => |tuple| { const val = tuple.values[index]; - if (val.tag() == .unreachable_value) { - return tuple.types[index].onePossibleValue(); + if (val == .none) { + return tuple.types[index].toType().onePossibleValue(mod); } else { - return val; - } - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - if (val.tag() == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(); - } else { - return val; + return val.toValue(); } }, else => unreachable, } } - pub fn structFieldIsComptime(ty: Type, index: usize) bool { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) return false; const field = struct_obj.fields.values()[index]; return field.is_comptime; }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - return val.tag() != .unreachable_value; - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - return val.tag() != .unreachable_value; - }, + .anon_struct_type => |anon_struct| anon_struct.values[index] != .none, else => unreachable, - } + }; } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { + const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -5774,9 +3142,9 @@ pub const Type = extern union { var elem_size_bits: u16 = undefined; var running_bits: u16 = 0; for (struct_obj.fields.values(), 0..) |f, i| { - if (!f.ty.hasRuntimeBits()) continue; + if (!f.ty.hasRuntimeBits(mod)) continue; - const field_bits = @intCast(u16, f.ty.bitSize(target)); + const field_bits = @intCast(u16, f.ty.bitSize(mod)); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -5797,9 +3165,10 @@ pub const Type = extern union { offset: u64 = 0, big_align: u32 = 0, struct_obj: *Module.Struct, - target: Target, + module: *Module, pub fn next(it: *StructOffsetIterator) ?FieldOffset { + const mod = it.module; var i = it.field; if (it.struct_obj.fields.count() <= i) return null; @@ -5811,35 +3180,36 @@ pub const Type = extern union { const field = it.struct_obj.fields.values()[i]; it.field += 1; - if (field.is_comptime or !field.ty.hasRuntimeBits()) { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) { return FieldOffset{ .field = i, .offset = it.offset }; } - const field_align = field.alignment(it.target, it.struct_obj.layout); + const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); - it.offset = field_offset + field.ty.abiSize(it.target); + it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } }; /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, target: Target) StructOffsetIterator { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { + const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - return .{ .struct_obj = struct_obj, .target = target }; + return .{ .struct_obj = struct_obj, .module = mod }; } /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(target); + var it = ty.iterateStructOffsets(mod); while (it.next()) |field_offset| { if (index == field_offset.field) return field_offset.offset; @@ -5848,34 +3218,32 @@ pub const Type = extern union { return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - + .anon_struct_type => |tuple| { var offset: u64 = 0; var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) { + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; } - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - offset += field_ty.abiSize(target); + offset += field_ty.toType().abiSize(mod); } offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); return offset; }, - .@"union" => return 0, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(target, true); + .union_type => |union_type| { + if (!union_type.hasTag()) + return 0; + const union_obj = mod.unionPtr(union_type.index); + const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); @@ -5884,6 +3252,7 @@ pub const Type = extern union { return 0; } }, + else => unreachable, } } @@ -5893,507 +3262,92 @@ pub const Type = extern union { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.srcLoc(mod); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return enum_numbered.srcLoc(mod); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.srcLoc(mod); - }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.srcLoc(mod); }, - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.srcLoc(mod); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); return union_obj.srcLoc(mod); }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - return opaque_obj.srcLoc(mod); - }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - - else => return null, - } + .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), + else => null, + }; } - pub fn getOwnerDecl(ty: Type) Module.Decl.Index { - return ty.getOwnerDeclOrNull() orelse unreachable; + pub fn getOwnerDecl(ty: Type, mod: *Module) Module.Decl.Index { + return ty.getOwnerDeclOrNull(mod) orelse unreachable; } - pub fn getOwnerDeclOrNull(ty: Type) ?Module.Decl.Index { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.owner_decl; - }, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.owner_decl; - }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; return struct_obj.owner_decl; }, - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.owner_decl; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); return union_obj.owner_decl; }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - return opaque_obj.owner_decl; + .opaque_type => |opaque_type| opaque_type.decl, + .enum_type => |enum_type| enum_type.decl, + else => null, + }; + } + + pub fn isGenericPoison(ty: Type) bool { + return ty.toIntern() == .generic_poison_type; + } + + pub fn isTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // These need to be resolved earlier. - - else => return null, - } - } - - /// This enum does not directly correspond to `std.builtin.TypeId` because - /// it has extra enum tags in it, as a way of using less memory. For example, - /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types - /// but with different alignment values, in this data structure they are represented - /// with different enum tags, because the the former requires more payload data than the latter. - /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. - pub const Tag = enum(usize) { - // The first section of this enum are tags that require no payload. - u1, - u8, - i8, - u16, - i16, - u29, - u32, - i32, - u64, - i64, - u128, - i128, - usize, - isize, - c_char, - c_short, - c_ushort, - c_int, - c_uint, - c_long, - c_ulong, - c_longlong, - c_ulonglong, - c_longdouble, - f16, - f32, - f64, - f80, - f128, - anyopaque, - bool, - void, - type, - anyerror, - comptime_int, - comptime_float, - noreturn, - @"anyframe", - null, - undefined, - enum_literal, - atomic_order, - atomic_rmw_op, - calling_convention, - address_space, - float_mode, - reduce_op, - modifier, - prefetch_options, - export_options, - extern_options, - type_info, - manyptr_u8, - manyptr_const_u8, - manyptr_const_u8_sentinel_0, - fn_noreturn_no_args, - fn_void_no_args, - fn_naked_noreturn_no_args, - fn_ccc_void_no_args, - single_const_pointer_to_comptime_int, - const_slice_u8, - const_slice_u8_sentinel_0, - anyerror_void_error_union, - generic_poison, - /// Same as `empty_struct` except it has an empty namespace. - empty_struct_literal, - /// This is a special value that tracks a set of types that have been stored - /// to an inferred allocation. It does not support most of the normal type queries. - /// However it does respond to `isConstPtr`, `ptrSize`, `zigTypeTag`, etc. - inferred_alloc_mut, - /// Same as `inferred_alloc_mut` but the local is `var` not `const`. - inferred_alloc_const, // See last_no_payload_tag below. - // After this, the tag requires a payload. - - array_u8, - array_u8_sentinel_0, - array, - array_sentinel, - vector, - /// Possible Value tags for this: @"struct" - tuple, - /// Possible Value tags for this: @"struct" - anon_struct, - pointer, - single_const_pointer, - single_mut_pointer, - many_const_pointer, - many_mut_pointer, - c_const_pointer, - c_mut_pointer, - const_slice, - mut_slice, - int_signed, - int_unsigned, - function, - optional, - optional_single_mut_pointer, - optional_single_const_pointer, - error_union, - anyframe_T, - error_set, - error_set_single, - /// The type is the inferred error set of a specific function. - error_set_inferred, - error_set_merged, - empty_struct, - @"opaque", - @"struct", - @"union", - union_safety_tagged, - union_tagged, - enum_simple, - enum_numbered, - enum_full, - enum_nonexhaustive, - - pub const last_no_payload_tag = Tag.inferred_alloc_const; - pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; - - pub fn Type(comptime t: Tag) type { - // Keep in sync with tools/stage2_pretty_printers_common.py - return switch (t) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .noreturn, - .enum_literal, - .null, - .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .single_const_pointer_to_comptime_int, - .anyerror_void_error_union, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .generic_poison, - .inferred_alloc_const, - .inferred_alloc_mut, - .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - - .array_u8, - .array_u8_sentinel_0, - => Payload.Len, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyframe_T, - => Payload.ElemType, - - .int_signed, - .int_unsigned, - => Payload.Bits, - - .error_set => Payload.ErrorSet, - .error_set_inferred => Payload.ErrorSetInferred, - .error_set_merged => Payload.ErrorSetMerged, - - .array, .vector => Payload.Array, - .array_sentinel => Payload.ArraySentinel, - .pointer => Payload.Pointer, - .function => Payload.Function, - .error_union => Payload.ErrorUnion, - .error_set_single => Payload.Name, - .@"opaque" => Payload.Opaque, - .@"struct" => Payload.Struct, - .@"union", .union_safety_tagged, .union_tagged => Payload.Union, - .enum_full, .enum_nonexhaustive => Payload.EnumFull, - .enum_simple => Payload.EnumSimple, - .enum_numbered => Payload.EnumNumbered, - .empty_struct => Payload.ContainerScope, - .tuple => Payload.Tuple, - .anon_struct => Payload.AnonStruct, - }; - } - - pub fn init(comptime t: Tag) file_struct.Type { - comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count); - return .{ .tag_if_small_enough = t }; - } - - pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type { - const p = try ally.create(t.Type()); - p.* = .{ - .base = .{ .tag = t }, - .data = data, - }; - return file_struct.Type{ .ptr_otherwise = &p.base }; - } - - pub fn Data(comptime t: Tag) type { - return std.meta.fieldInfo(t.Type(), .data).type; - } - }; - - pub fn isTuple(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, else => false, }; } - pub fn isAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .anon_struct, .empty_struct_literal => true, + pub fn isAnonStruct(ty: Type, mod: *Module) bool { + if (ty.toIntern() == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, else => false, }; } - pub fn isTupleOrAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, - else => false, - }; - } - - pub fn isSimpleTuple(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal => true, - else => false, - }; - } - - pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, - else => false, - }; - } - - // Only allowed for simple tuple types - pub fn tupleFields(ty: Type) Payload.Tuple.Data { - return switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data, - .anon_struct => .{ - .types = ty.castTag(.anon_struct).?.data.types, - .values = ty.castTag(.anon_struct).?.data.values, + pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; }, - .empty_struct_literal => .{ .types = &.{}, .values = &.{} }, - else => unreachable, + .anon_struct_type => true, + else => false, + }; + } + + pub fn isSimpleTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, + else => false, + }; + } + + pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => true, + else => false, }; } - /// The sub-types are named after what fields they contain. pub const Payload = struct { - tag: Tag, - - pub const Len = struct { - base: Payload, - data: u64, - }; - - pub const Array = struct { - base: Payload, - data: struct { - len: u64, - elem_type: Type, - }, - }; - - pub const ArraySentinel = struct { - pub const base_tag = Tag.array_sentinel; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - len: u64, - sentinel: Value, - elem_type: Type, - }, - }; - - pub const ElemType = struct { - base: Payload, - data: Type, - }; - - pub const Bits = struct { - base: Payload, - data: u16, - }; - - pub const Function = struct { - pub const base_tag = Tag.function; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - // TODO look into optimizing this memory to take fewer bytes - pub const Data = struct { - param_types: []Type, - comptime_params: [*]bool, - return_type: Type, - /// If zero use default target function code alignment. - alignment: u32, - noalias_bits: u32, - cc: std.builtin.CallingConvention, - is_var_args: bool, - is_generic: bool, - is_noinline: bool, - align_is_generic: bool, - cc_is_generic: bool, - section_is_generic: bool, - addrspace_is_generic: bool, - - pub fn paramIsComptime(self: @This(), i: usize) bool { - assert(i < self.param_types.len); - return self.comptime_params[i]; - } - }; - }; - - pub const ErrorSet = struct { - pub const base_tag = Tag.error_set; - - base: Payload = Payload{ .tag = base_tag }, - data: *Module.ErrorSet, - }; - - pub const ErrorSetMerged = struct { - pub const base_tag = Tag.error_set_merged; - - base: Payload = Payload{ .tag = base_tag }, - data: Module.ErrorSet.NameMap, - }; - - pub const ErrorSetInferred = struct { - pub const base_tag = Tag.error_set_inferred; - - base: Payload = Payload{ .tag = base_tag }, - data: *Module.Fn.InferredErrorSet, - }; - + /// TODO: remove this data structure since we have `InternPool.Key.PtrType`. pub const Pointer = struct { - pub const base_tag = Tag.pointer; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - pub const Data = struct { pointee_type: Type, sentinel: ?Value = null, @@ -6417,145 +3371,103 @@ pub const Type = extern union { @"volatile": bool = false, size: std.builtin.Type.Pointer.Size = .One, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, - _, - }; + pub const VectorIndex = InternPool.Key.PtrType.VectorIndex; - pub fn alignment(data: Data, target: Target) u32 { + pub fn alignment(data: Data, mod: *Module) u32 { if (data.@"align" != 0) return data.@"align"; - return abiAlignment(data.pointee_type, target); + return abiAlignment(data.pointee_type, mod); + } + + pub fn fromKey(p: InternPool.Key.PtrType) Data { + return .{ + .pointee_type = p.child.toType(), + .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, + .@"align" = @intCast(u32, p.flags.alignment.toByteUnits(0)), + .@"addrspace" = p.flags.address_space, + .bit_offset = p.packed_offset.bit_offset, + .host_size = p.packed_offset.host_size, + .vector_index = p.flags.vector_index, + .@"allowzero" = p.flags.is_allowzero, + .mutable = !p.flags.is_const, + .@"volatile" = p.flags.is_volatile, + .size = p.flags.size, + }; } }; }; - - pub const ErrorUnion = struct { - pub const base_tag = Tag.error_union; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - error_set: Type, - payload: Type, - }, - }; - - pub const Decl = struct { - base: Payload, - data: *Module.Decl, - }; - - pub const Name = struct { - base: Payload, - /// memory is owned by `Module` - data: []const u8, - }; - - /// Mostly used for namespace like structs with zero fields. - /// Most commonly used for files. - pub const ContainerScope = struct { - base: Payload, - data: *Module.Namespace, - }; - - pub const Opaque = struct { - base: Payload = .{ .tag = .@"opaque" }, - data: *Module.Opaque, - }; - - pub const Struct = struct { - base: Payload = .{ .tag = .@"struct" }, - data: *Module.Struct, - }; - - pub const Tuple = struct { - base: Payload = .{ .tag = .tuple }, - data: Data, - - pub const Data = struct { - types: []Type, - /// unreachable_value elements are used to indicate runtime-known. - values: []Value, - }; - }; - - pub const AnonStruct = struct { - base: Payload = .{ .tag = .anon_struct }, - data: Data, - - pub const Data = struct { - names: []const []const u8, - types: []Type, - /// unreachable_value elements are used to indicate runtime-known. - values: []Value, - }; - }; - - pub const Union = struct { - base: Payload, - data: *Module.Union, - }; - - pub const EnumFull = struct { - base: Payload, - data: *Module.EnumFull, - }; - - pub const EnumSimple = struct { - base: Payload = .{ .tag = .enum_simple }, - data: *Module.EnumSimple, - }; - - pub const EnumNumbered = struct { - base: Payload = .{ .tag = .enum_numbered }, - data: *Module.EnumNumbered, - }; }; - pub const @"u1" = initTag(.u1); - pub const @"u8" = initTag(.u8); - pub const @"u16" = initTag(.u16); - pub const @"u29" = initTag(.u29); - pub const @"u32" = initTag(.u32); - pub const @"u64" = initTag(.u64); + pub const @"u1": Type = .{ .ip_index = .u1_type }; + pub const @"u8": Type = .{ .ip_index = .u8_type }; + pub const @"u16": Type = .{ .ip_index = .u16_type }; + pub const @"u29": Type = .{ .ip_index = .u29_type }; + pub const @"u32": Type = .{ .ip_index = .u32_type }; + pub const @"u64": Type = .{ .ip_index = .u64_type }; + pub const @"u128": Type = .{ .ip_index = .u128_type }; - pub const @"i32" = initTag(.i32); - pub const @"i64" = initTag(.i64); + pub const @"i8": Type = .{ .ip_index = .i8_type }; + pub const @"i16": Type = .{ .ip_index = .i16_type }; + pub const @"i32": Type = .{ .ip_index = .i32_type }; + pub const @"i64": Type = .{ .ip_index = .i64_type }; + pub const @"i128": Type = .{ .ip_index = .i128_type }; - pub const @"f16" = initTag(.f16); - pub const @"f32" = initTag(.f32); - pub const @"f64" = initTag(.f64); - pub const @"f80" = initTag(.f80); - pub const @"f128" = initTag(.f128); + pub const @"f16": Type = .{ .ip_index = .f16_type }; + pub const @"f32": Type = .{ .ip_index = .f32_type }; + pub const @"f64": Type = .{ .ip_index = .f64_type }; + pub const @"f80": Type = .{ .ip_index = .f80_type }; + pub const @"f128": Type = .{ .ip_index = .f128_type }; - pub const @"bool" = initTag(.bool); - pub const @"usize" = initTag(.usize); - pub const @"isize" = initTag(.isize); - pub const @"comptime_int" = initTag(.comptime_int); - pub const @"void" = initTag(.void); - pub const @"type" = initTag(.type); - pub const @"anyerror" = initTag(.anyerror); - pub const @"anyopaque" = initTag(.anyopaque); - pub const @"null" = initTag(.null); + pub const @"bool": Type = .{ .ip_index = .bool_type }; + pub const @"usize": Type = .{ .ip_index = .usize_type }; + pub const @"isize": Type = .{ .ip_index = .isize_type }; + pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; + pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; + pub const @"void": Type = .{ .ip_index = .void_type }; + pub const @"type": Type = .{ .ip_index = .type_type }; + pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; + pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; + pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; + pub const @"null": Type = .{ .ip_index = .null_type }; + pub const @"undefined": Type = .{ .ip_index = .undefined_type }; + pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; + + pub const @"c_char": Type = .{ .ip_index = .c_char_type }; + pub const @"c_short": Type = .{ .ip_index = .c_short_type }; + pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; + pub const @"c_int": Type = .{ .ip_index = .c_int_type }; + pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; + pub const @"c_long": Type = .{ .ip_index = .c_long_type }; + pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; + pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; + pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; + pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; + + pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; + pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; + pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, + }; + pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; + pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; + + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { - const target = mod.getTarget(); + // TODO: update callsites of this function to directly call mod.ptrType + // and then delete this function. + _ = arena; var d = data; - if (d.size == .C) { - d.@"allowzero" = true; - } - // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. if (d.@"align" != 0) canonicalize: { - if (!d.pointee_type.layoutIsResolved()) break :canonicalize; - if (d.@"align" == d.pointee_type.abiAlignment(target)) { + if (!d.pointee_type.layoutIsResolved(mod)) break :canonicalize; + if (d.@"align" == d.pointee_type.abiAlignment(mod)) { d.@"align" = 0; } } @@ -6565,57 +3477,29 @@ pub const Type = extern union { // needs to be resolved before calling this ptr() function. if (d.host_size != 0) { assert(d.bit_offset < d.host_size * 8); - if (d.host_size * 8 == d.pointee_type.bitSize(target)) { + if (d.host_size * 8 == d.pointee_type.bitSize(mod)) { assert(d.bit_offset == 0); d.host_size = 0; } } - if (d.@"align" == 0 and d.@"addrspace" == .generic and - d.bit_offset == 0 and d.host_size == 0 and d.vector_index == .none and - !d.@"allowzero" and !d.@"volatile") - { - if (d.sentinel) |sent| { - if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.const_slice_u8_sentinel_0); - } - }, - .Many => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.manyptr_const_u8_sentinel_0); - } - }, - else => {}, - } - } - } else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => return Type.initTag(.const_slice_u8), - .Many => return Type.initTag(.manyptr_const_u8), - else => {}, - } - } else { - const T = Type.Tag; - const type_payload = try arena.create(Type.Payload.ElemType); - type_payload.* = .{ - .base = .{ - .tag = switch (d.size) { - .One => if (d.mutable) T.single_mut_pointer else T.single_const_pointer, - .Many => if (d.mutable) T.many_mut_pointer else T.many_const_pointer, - .C => if (d.mutable) T.c_mut_pointer else T.c_const_pointer, - .Slice => if (d.mutable) T.mut_slice else T.const_slice, - }, - }, - .data = d.pointee_type, - }; - return Type.initPayload(&type_payload.base); - } - } - - return Type.Tag.pointer.create(arena, d); + return mod.ptrType(.{ + .child = d.pointee_type.ip_index, + .sentinel = if (d.sentinel) |s| s.ip_index else .none, + .flags = .{ + .alignment = InternPool.Alignment.fromByteUnits(d.@"align"), + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }, + .packed_offset = .{ + .host_size = d.host_size, + .bit_offset = d.bit_offset, + }, + }); } pub fn array( @@ -6625,68 +3509,23 @@ pub const Type = extern union { elem_type: Type, mod: *Module, ) Allocator.Error!Type { - if (elem_type.eql(Type.u8, mod)) { - if (sent) |some| { - if (some.eql(Value.zero, elem_type, mod)) { - return Tag.array_u8_sentinel_0.create(arena, len); - } - } else { - return Tag.array_u8.create(arena, len); - } - } + // TODO: update callsites of this function to directly call mod.arrayType + // and then delete this function. + _ = arena; - if (sent) |some| { - return Tag.array_sentinel.create(arena, .{ - .len = len, - .sentinel = some, - .elem_type = elem_type, - }); - } - - return Tag.array.create(arena, .{ + return mod.arrayType(.{ .len = len, - .elem_type = elem_type, + .child = elem_type.ip_index, + .sentinel = if (sent) |s| s.ip_index else .none, }); } - pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type { - return Tag.vector.create(arena, .{ - .len = len, - .elem_type = elem_type, - }); - } + pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type { + // TODO: update callsites of this function to directly call + // mod.optionalType and then delete this function. + _ = arena; - pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type { - switch (child_type.tag()) { - .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( - arena, - child_type.elemType(), - ), - .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create( - arena, - child_type.elemType(), - ), - else => return Type.Tag.optional.create(arena, child_type), - } - } - - pub fn errorUnion( - arena: Allocator, - error_set: Type, - payload: Type, - mod: *Module, - ) Allocator.Error!Type { - assert(error_set.zigTypeTag() == .ErrorSet); - if (error_set.eql(Type.anyerror, mod) and - payload.eql(Type.void, mod)) - { - return Type.initTag(.anyerror_void_error_union); - } - - return Type.Tag.error_union.create(arena, .{ - .error_set = error_set, - .payload = payload, - }); + return mod.optionalType(child_type.ip_index); } pub fn smallestUnsignedBits(max: u64) u16 { @@ -6696,113 +3535,7 @@ pub const Type = extern union { return @intCast(u16, base + @boolToInt(upper < max)); } - pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type { - const bits = smallestUnsignedBits(max); - return intWithBits(arena, false, bits); - } - - pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type { - return if (sign) switch (bits) { - 8 => initTag(.i8), - 16 => initTag(.i16), - 32 => initTag(.i32), - 64 => initTag(.i64), - else => return Tag.int_signed.create(arena, bits), - } else switch (bits) { - 1 => initTag(.u1), - 8 => initTag(.u8), - 16 => initTag(.u16), - 32 => initTag(.u32), - 64 => initTag(.u64), - else => return Tag.int_unsigned.create(arena, bits), - }; - } - - /// Given a value representing an integer, returns the number of bits necessary to represent - /// this value in an integer. If `sign` is true, returns the number of bits necessary in a - /// twos-complement integer; otherwise in an unsigned integer. - /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. - pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 { - assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return smallestUnsignedBits(@intCast(u64, x)); - assert(sign); - return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; - }, - else => { - const x = val.toUnsignedInt(target); - return smallestUnsignedBits(x) + @boolToInt(sign); - }, - } - } - - /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither - /// value is undef. - /// TODO: if #3806 is implemented, this becomes trivial - pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type { - assert(!min.isUndef()); - assert(!max.isUndef()); - - if (std.debug.runtime_safety) { - assert(Value.order(min, max, target).compare(.lte)); - } - - const sign = min.orderAgainstZero() == .lt; - - const min_val_bits = intBitsForValue(target, min, sign); - const max_val_bits = intBitsForValue(target, max, sign); - const bits = @max(min_val_bits, max_val_bits); - - return intWithBits(arena, sign, bits); - } - /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; - - /// This function is used in the debugger pretty formatters in tools/ to fetch the - /// Tag to Payload mapping to facilitate fancy debug printing for this type. - fn dbHelper(self: *Type, tag_to_payload_map: *map: { - const tags = @typeInfo(Tag).Enum.fields; - var fields: [tags.len]std.builtin.Type.StructField = undefined; - for (&fields, tags) |*field, t| field.* = .{ - .name = t.name, - .type = *if (t.value < Tag.no_payload_count) void else @field(Tag, t.name).Type(), - .default_value = null, - .is_comptime = false, - .alignment = 0, - }; - break :map @Type(.{ .Struct = .{ - .layout = .Extern, - .fields = &fields, - .decls = &.{}, - .is_tuple = false, - } }); - }) void { - _ = self; - _ = tag_to_payload_map; - } - - comptime { - if (builtin.mode == .Debug) { - _ = &dbHelper; - } - } }; diff --git a/src/value.zig b/src/value.zig index af2d7b1ca2..d3f15121b8 100644 --- a/src/value.zig +++ b/src/value.zig @@ -11,147 +11,27 @@ const Module = @import("Module.zig"); const Air = @import("Air.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); -/// This is the raw data, with no bookkeeping, no memory awareness, -/// no de-duplication, and no type system awareness. -/// It's important for this type to be small. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Value = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, +pub const Value = struct { + /// We are migrating towards using this for every Value object. However, many + /// values are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, + /// no de-duplication, and no type system awareness. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + ptr_otherwise: *Payload, + }, // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - manyptr_const_u8_sentinel_0_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, - anyerror_void_error_union_type, - generic_poison_type, - - undef, - zero, - one, - void_value, - unreachable_value, - /// The only possible value for a particular type, which is stored externally. - the_only_possible_value, - null_value, - bool_true, - bool_false, - generic_poison, - - empty_struct_value, - empty_array, // See last_no_payload_tag below. // After this, the tag requires a payload. - ty, - int_type, - int_u64, - int_i64, - int_big_positive, - int_big_negative, - function, - extern_fn, - variable, - /// A wrapper for values which are comptime-known but should - /// semantically be runtime-known. - runtime_value, - /// Represents a pointer to a Decl. - /// When machine codegen backend sees this, it must set the Decl's `alive` field to true. - decl_ref, - /// Pointer to a Decl, but allows comptime code to mutate the Decl's Value. - /// This Tag will never be seen by machine codegen backends. It is changed into a - /// `decl_ref` when a comptime variable goes out of scope. - decl_ref_mut, - /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value. - comptime_field_ptr, - /// Pointer to a specific element of an array, vector or slice. - elem_ptr, - /// Pointer to a specific field of a struct or union. - field_ptr, - /// A slice of u8 whose memory is managed externally. - bytes, - /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. - str_lit, - /// This value is repeated some number of times. The amount of times to repeat - /// is stored externally. - repeated, - /// An array with length 0 but it has a sentinel. - empty_array_sentinel, - /// Pointer and length as sub `Value` objects. - slice, - float_16, - float_32, - float_64, - float_80, - float_128, - enum_literal, - /// A specific enum tag, indicated by the field index (declaration order). - enum_field_index, - @"error", /// When the type is error union: /// * If the tag is `.@"error"`, the error union is an error. /// * If the tag is `.eu_payload`, the error union is a payload. @@ -159,8 +39,6 @@ pub const Value = extern union { /// is non-error, but the inner error union is an error, is represented as /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. eu_payload, - /// A pointer to the payload of an error union, based on a pointer to an error union. - eu_payload_ptr, /// When the type is optional: /// * If the tag is `.null_value`, the optional is null. /// * If the tag is `.opt_payload`, the optional is a payload. @@ -168,8 +46,13 @@ pub const Value = extern union { /// is non-null, but the inner optional is null, is represented as /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. opt_payload, - /// A pointer to the payload of an optional, based on a pointer to an optional. - opt_payload_ptr, + /// Pointer and length as sub `Value` objects. + slice, + /// A slice of u8 whose memory is managed externally. + bytes, + /// This value is repeated some number of times. The amount of times to repeat + /// is stored externally. + repeated, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -177,152 +60,17 @@ pub const Value = extern union { aggregate, /// An instance of a union. @"union", - /// This is a special value that tracks a set of types that have been stored - /// to an inferred allocation. It does not support any of the normal value queries. - inferred_alloc, - /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc - /// instructions for comptime code. - inferred_alloc_comptime, - /// The ABI alignment of the payload type. - lazy_align, - /// The ABI size of the payload type. - lazy_size, - - pub const last_no_payload_tag = Tag.empty_array; - pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; pub fn Type(comptime t: Tag) type { return switch (t) { - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .null_type, - .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, - .single_const_pointer_to_comptime_int_type, - .anyframe_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .generic_poison_type, - .enum_literal_type, - .undef, - .zero, - .one, - .void_value, - .unreachable_value, - .the_only_possible_value, - .empty_struct_value, - .empty_array, - .null_value, - .bool_true, - .bool_false, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, - .atomic_order_type, - .atomic_rmw_op_type, - .calling_convention_type, - .address_space_type, - .float_mode_type, - .reduce_op_type, - .modifier_type, - .prefetch_options_type, - .export_options_type, - .extern_options_type, - .type_info_type, - .generic_poison, - => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - - .int_big_positive, - .int_big_negative, - => Payload.BigInt, - - .extern_fn => Payload.ExternFn, - - .decl_ref => Payload.Decl, - - .repeated, .eu_payload, .opt_payload, - .empty_array_sentinel, - .runtime_value, + .repeated, => Payload.SubValue, - - .eu_payload_ptr, - .opt_payload_ptr, - => Payload.PayloadPtr, - - .bytes, - .enum_literal, - => Payload.Bytes, - - .str_lit => Payload.StrLit, .slice => Payload.Slice, - - .enum_field_index => Payload.U32, - - .ty, - .lazy_align, - .lazy_size, - => Payload.Ty, - - .int_type => Payload.IntType, - .int_u64 => Payload.U64, - .int_i64 => Payload.I64, - .function => Payload.Function, - .variable => Payload.Variable, - .decl_ref_mut => Payload.DeclRefMut, - .elem_ptr => Payload.ElemPtr, - .field_ptr => Payload.FieldPtr, - .float_16 => Payload.Float_16, - .float_32 => Payload.Float_32, - .float_64 => Payload.Float_64, - .float_80 => Payload.Float_80, - .float_128 => Payload.Float_128, - .@"error" => Payload.Error, - .inferred_alloc => Payload.InferredAlloc, - .inferred_alloc_comptime => Payload.InferredAllocComptime, + .bytes => Payload.Bytes, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, - .comptime_field_ptr => Payload.ComptimeFieldPtr, }; } @@ -332,7 +80,10 @@ pub const Value = extern union { .base = .{ .tag = t }, .data = data, }; - return Value{ .ptr_otherwise = &ptr.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &ptr.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -340,39 +91,31 @@ pub const Value = extern union { } }; - pub fn initTag(small_tag: Tag) Value { - assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; - } - pub fn initPayload(payload: *Payload) Value { - assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } pub fn tag(self: Value) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; - } else { - return self.ptr_otherwise.tag; - } + assert(self.ip_index == .none); + return self.legacy.ptr_otherwise.tag; } /// Prefer `castTag` to this. pub fn cast(self: Value, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return null; - } inline for (@typeInfo(Tag).Enum.fields) |field| { - if (field.value < Tag.no_payload_count) - continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -381,11 +124,10 @@ pub const Value = extern union { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) - return null; + if (self.ip_index != .none) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -393,165 +135,10 @@ pub const Value = extern union { /// It's intentional that this function is not passed a corresponding Type, so that /// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types. pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Value{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .null_type, - .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, - .single_const_pointer_to_comptime_int_type, - .anyframe_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .generic_poison_type, - .enum_literal_type, - .undef, - .zero, - .one, - .void_value, - .unreachable_value, - .the_only_possible_value, - .empty_array, - .null_value, - .bool_true, - .bool_false, - .empty_struct_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, - .atomic_order_type, - .atomic_rmw_op_type, - .calling_convention_type, - .address_space_type, - .float_mode_type, - .reduce_op_type, - .modifier_type, - .prefetch_options_type, - .export_options_type, - .extern_options_type, - .type_info_type, - .generic_poison, - => unreachable, - - .ty, .lazy_align, .lazy_size => { - const payload = self.cast(Payload.Ty).?; - const new_payload = try arena.create(Payload.Ty); - new_payload.* = .{ - .base = payload.base, - .data = try payload.data.copy(arena), - }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, - .int_type => return self.copyPayloadShallow(arena, Payload.IntType), - .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), - .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), - .int_big_positive, .int_big_negative => { - const old_payload = self.cast(Payload.BigInt).?; - const new_payload = try arena.create(Payload.BigInt); - new_payload.* = .{ - .base = .{ .tag = self.ptr_otherwise.tag }, - .data = try arena.dupe(std.math.big.Limb, old_payload.data), - }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, - .function => return self.copyPayloadShallow(arena, Payload.Function), - .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), - .variable => return self.copyPayloadShallow(arena, Payload.Variable), - .decl_ref => return self.copyPayloadShallow(arena, Payload.Decl), - .decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut), - .eu_payload_ptr, - .opt_payload_ptr, - => { - const payload = self.cast(Payload.PayloadPtr).?; - const new_payload = try arena.create(Payload.PayloadPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = try payload.data.container_ty.copy(arena), - }, - }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, - .comptime_field_ptr => { - const payload = self.cast(Payload.ComptimeFieldPtr).?; - const new_payload = try arena.create(Payload.ComptimeFieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .field_val = try payload.data.field_val.copy(arena), - .field_ty = try payload.data.field_ty.copy(arena), - }, - }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, - .elem_ptr => { - const payload = self.castTag(.elem_ptr).?; - const new_payload = try arena.create(Payload.ElemPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .array_ptr = try payload.data.array_ptr.copy(arena), - .elem_ty = try payload.data.elem_ty.copy(arena), - .index = payload.data.index, - }, - }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, - .field_ptr => { - const payload = self.castTag(.field_ptr).?; - const new_payload = try arena.create(Payload.FieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = try payload.data.container_ty.copy(arena), - .field_index = payload.data.field_index, - }, - }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, + if (self.ip_index != .none) { + return Value{ .ip_index = self.ip_index, .legacy = undefined }; + } + switch (self.legacy.ptr_otherwise.tag) { .bytes => { const bytes = self.castTag(.bytes).?.data; const new_payload = try arena.create(Payload.Bytes); @@ -559,14 +146,14 @@ pub const Value = extern union { .base = .{ .tag = .bytes }, .data = try arena.dupe(u8, bytes), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), - .repeated, .eu_payload, .opt_payload, - .empty_array_sentinel, - .runtime_value, + .repeated, => { const payload = self.cast(Payload.SubValue).?; const new_payload = try arena.create(Payload.SubValue); @@ -574,7 +161,10 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .slice => { const payload = self.castTag(.slice).?; @@ -586,25 +176,11 @@ pub const Value = extern union { .len = try payload.data.len.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; - }, - .float_16 => return self.copyPayloadShallow(arena, Payload.Float_16), - .float_32 => return self.copyPayloadShallow(arena, Payload.Float_32), - .float_64 => return self.copyPayloadShallow(arena, Payload.Float_64), - .float_80 => return self.copyPayloadShallow(arena, Payload.Float_80), - .float_128 => return self.copyPayloadShallow(arena, Payload.Float_128), - .enum_literal => { - const payload = self.castTag(.enum_literal).?; - const new_payload = try arena.create(Payload.Bytes); - new_payload.* = .{ - .base = payload.base, - .data = try arena.dupe(u8, payload.data), + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, }; - return Value{ .ptr_otherwise = &new_payload.base }; }, - .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32), - .@"error" => return self.copyPayloadShallow(arena, Payload.Error), - .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -615,9 +191,11 @@ pub const Value = extern union { for (new_payload.data, 0..) |*elem, i| { elem.* = try payload.data[i].copy(arena); } - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .@"union" => { const tag_and_val = self.castTag(.@"union").?.data; const new_payload = try arena.create(Payload.Union); @@ -628,11 +206,11 @@ pub const Value = extern union { .val = try tag_and_val.val.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, } } @@ -640,7 +218,10 @@ pub const Value = extern union { const payload = self.cast(T).?; const new_payload = try arena.create(T); new_payload.* = payload.*; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -656,181 +237,36 @@ pub const Value = extern union { pub fn dump( start_val: Value, comptime fmt: []const u8, - options: std.fmt.FormatOptions, + _: std.fmt.FormatOptions, out_stream: anytype, ) !void { comptime assert(fmt.len == 0); + if (start_val.ip_index != .none) { + try out_stream.print("(interned: {})", .{start_val.toIntern()}); + return; + } var val = start_val; while (true) switch (val.tag()) { - .u1_type => return out_stream.writeAll("u1"), - .u8_type => return out_stream.writeAll("u8"), - .i8_type => return out_stream.writeAll("i8"), - .u16_type => return out_stream.writeAll("u16"), - .u29_type => return out_stream.writeAll("u29"), - .i16_type => return out_stream.writeAll("i16"), - .u32_type => return out_stream.writeAll("u32"), - .i32_type => return out_stream.writeAll("i32"), - .u64_type => return out_stream.writeAll("u64"), - .i64_type => return out_stream.writeAll("i64"), - .u128_type => return out_stream.writeAll("u128"), - .i128_type => return out_stream.writeAll("i128"), - .isize_type => return out_stream.writeAll("isize"), - .usize_type => return out_stream.writeAll("usize"), - .c_char_type => return out_stream.writeAll("c_char"), - .c_short_type => return out_stream.writeAll("c_short"), - .c_ushort_type => return out_stream.writeAll("c_ushort"), - .c_int_type => return out_stream.writeAll("c_int"), - .c_uint_type => return out_stream.writeAll("c_uint"), - .c_long_type => return out_stream.writeAll("c_long"), - .c_ulong_type => return out_stream.writeAll("c_ulong"), - .c_longlong_type => return out_stream.writeAll("c_longlong"), - .c_ulonglong_type => return out_stream.writeAll("c_ulonglong"), - .c_longdouble_type => return out_stream.writeAll("c_longdouble"), - .f16_type => return out_stream.writeAll("f16"), - .f32_type => return out_stream.writeAll("f32"), - .f64_type => return out_stream.writeAll("f64"), - .f80_type => return out_stream.writeAll("f80"), - .f128_type => return out_stream.writeAll("f128"), - .anyopaque_type => return out_stream.writeAll("anyopaque"), - .bool_type => return out_stream.writeAll("bool"), - .void_type => return out_stream.writeAll("void"), - .type_type => return out_stream.writeAll("type"), - .anyerror_type => return out_stream.writeAll("anyerror"), - .comptime_int_type => return out_stream.writeAll("comptime_int"), - .comptime_float_type => return out_stream.writeAll("comptime_float"), - .noreturn_type => return out_stream.writeAll("noreturn"), - .null_type => return out_stream.writeAll("@Type(.Null)"), - .undefined_type => return out_stream.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"), - .fn_void_no_args_type => return out_stream.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"), - .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), - .anyframe_type => return out_stream.writeAll("anyframe"), - .const_slice_u8_type => return out_stream.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return out_stream.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return out_stream.writeAll("anyerror!void"), - .generic_poison_type => return out_stream.writeAll("(generic poison type)"), - .generic_poison => return out_stream.writeAll("(generic poison)"), - .enum_literal_type => return out_stream.writeAll("@Type(.EnumLiteral)"), - .manyptr_u8_type => return out_stream.writeAll("[*]u8"), - .manyptr_const_u8_type => return out_stream.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return out_stream.writeAll("[*:0]const u8"), - .atomic_order_type => return out_stream.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op_type => return out_stream.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention_type => return out_stream.writeAll("std.builtin.CallingConvention"), - .address_space_type => return out_stream.writeAll("std.builtin.AddressSpace"), - .float_mode_type => return out_stream.writeAll("std.builtin.FloatMode"), - .reduce_op_type => return out_stream.writeAll("std.builtin.ReduceOp"), - .modifier_type => return out_stream.writeAll("std.builtin.CallModifier"), - .prefetch_options_type => return out_stream.writeAll("std.builtin.PrefetchOptions"), - .export_options_type => return out_stream.writeAll("std.builtin.ExportOptions"), - .extern_options_type => return out_stream.writeAll("std.builtin.ExternOptions"), - .type_info_type => return out_stream.writeAll("std.builtin.Type"), - - .empty_struct_value => return out_stream.writeAll("struct {}{}"), .aggregate => { return out_stream.writeAll("(aggregate)"); }, .@"union" => { return out_stream.writeAll("(union value)"); }, - .null_value => return out_stream.writeAll("null"), - .undef => return out_stream.writeAll("undefined"), - .zero => return out_stream.writeAll("0"), - .one => return out_stream.writeAll("1"), - .void_value => return out_stream.writeAll("{}"), - .unreachable_value => return out_stream.writeAll("unreachable"), - .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), - .bool_true => return out_stream.writeAll("true"), - .bool_false => return out_stream.writeAll("false"), - .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), - .lazy_align => { - try out_stream.writeAll("@alignOf("); - try val.castTag(.lazy_align).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .lazy_size => { - try out_stream.writeAll("@sizeOf("); - try val.castTag(.lazy_size).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return out_stream.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), - .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .runtime_value => return out_stream.writeAll("[runtime value]"), - .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), - .extern_fn => return out_stream.writeAll("(extern function)"), - .variable => return out_stream.writeAll("(variable)"), - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - return out_stream.print("(decl_ref_mut {d})", .{decl_index}); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - return out_stream.print("(decl_ref {d})", .{decl_index}); - }, - .comptime_field_ptr => { - return out_stream.writeAll("(comptime_field_ptr)"); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try out_stream.print("&[{}] ", .{elem_ptr.index}); - val = elem_ptr.array_ptr; - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try out_stream.print("fieldptr({d}) ", .{field_ptr.field_index}); - val = field_ptr.container_ptr; - }, - .empty_array => return out_stream.writeAll(".{}"), - .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => return out_stream.print("(enum field {d})", .{val.castTag(.enum_field_index).?.data}), .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return out_stream.print("(.str_lit index={d} len={d})", .{ - str_lit.index, str_lit.len, - }); - }, .repeated => { try out_stream.writeAll("(repeated) "); val = val.castTag(.repeated).?.data; }, - .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"), - .slice => return out_stream.writeAll("(slice)"), - .float_16 => return out_stream.print("{}", .{val.castTag(.float_16).?.data}), - .float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}), - .float_64 => return out_stream.print("{}", .{val.castTag(.float_64).?.data}), - .float_80 => return out_stream.print("{}", .{val.castTag(.float_80).?.data}), - .float_128 => return out_stream.print("{}", .{val.castTag(.float_128).?.data}), - .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), .eu_payload => { try out_stream.writeAll("(eu_payload) "); - val = val.castTag(.eu_payload).?.data; + val = val.castTag(.repeated).?.data; }, .opt_payload => { try out_stream.writeAll("(opt_payload) "); - val = val.castTag(.opt_payload).?.data; - }, - .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), - .eu_payload_ptr => { - try out_stream.writeAll("(eu_payload_ptr)"); - val = val.castTag(.eu_payload_ptr).?.data.container_ptr; - }, - .opt_payload_ptr => { - try out_stream.writeAll("(opt_payload_ptr)"); - val = val.castTag(.opt_payload_ptr).?.data.container_ptr; + val = val.castTag(.repeated).?.data; }, + .slice => return out_stream.writeAll("(slice)"), }; } @@ -845,421 +281,404 @@ pub const Value = extern union { } }; } + /// Asserts that the value is representable as an array of bytes. + /// Returns the value as a null-terminated string stored in the InternPool. + pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| enum_literal, + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => try arrayToIpString(val, ptr.len.toValue().toUnsignedInt(mod), mod), + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), + .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const len = @intCast(usize, ty.arrayLen(mod)); + try ip.string_bytes.appendNTimes(mod.gpa, byte, len); + return ip.getOrPutTrailingString(mod.gpa, len); + }, + }, + else => unreachable, + }; + } + /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - const target = mod.getTarget(); - switch (val.tag()) { - .bytes => { - const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel() != null); - const adjusted_bytes = bytes[0..adjusted_len]; - return allocator.dupe(u8, adjusted_bytes); + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => try arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return allocator.dupe(u8, bytes); + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; + }, }, - .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), - .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); - @memset(result, byte); - return result; - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - const decl_val = try decl.value(); - return decl_val.toAllocatedBytes(decl.ty, allocator, mod); - }, - .the_only_possible_value => return &[_]u8{}, - .slice => { - const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod); - }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), - } + else => unreachable, + }; } fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { const result = try allocator.alloc(u8, @intCast(usize, len)); - var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); - elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget())); + const elem_val = try val.elemValue(mod, i); + elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; } - pub const ToTypeBuffer = Type.Payload.Bits; + fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const len = @intCast(usize, len_u64); + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + for (0..len) |i| { + // I don't think elemValue has the possibility to affect ip.string_bytes. Let's + // assert just to be sure. + const prev = ip.string_bytes.items.len; + const elem_val = try val.elemValue(mod, i); + assert(ip.string_bytes.items.len == prev); + const byte = @intCast(u8, elem_val.toUnsignedInt(mod)); + ip.string_bytes.appendAssumeCapacity(byte); + } + return ip.getOrPutTrailingString(gpa, len); + } + + pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { + if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); + switch (val.tag()) { + .eu_payload => { + const pl = val.castTag(.eu_payload).?.data; + return mod.intern(.{ .error_union = .{ + .ty = ty.toIntern(), + .val = .{ .payload = try pl.intern(ty.errorUnionPayload(mod), mod) }, + } }); + }, + .opt_payload => { + const pl = val.castTag(.opt_payload).?.data; + return mod.intern(.{ .opt = .{ + .ty = ty.toIntern(), + .val = try pl.intern(ty.optionalChild(mod), mod), + } }); + }, + .slice => { + const pl = val.castTag(.slice).?.data; + const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); + var ptr_key = mod.intern_pool.indexToKey(ptr).ptr; + assert(ptr_key.len == .none); + ptr_key.ty = ty.toIntern(); + ptr_key.len = try pl.len.intern(Type.usize, mod); + return mod.intern(.{ .ptr = ptr_key }); + }, + .bytes => { + const pl = val.castTag(.bytes).?.data; + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = pl }, + } }); + }, + .repeated => { + const pl = val.castTag(.repeated).?.data; + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = try pl.intern(ty.childType(mod), mod) }, + } }); + }, + .aggregate => { + const len = @intCast(usize, ty.arrayLen(mod)); + const old_elems = val.castTag(.aggregate).?.data[0..len]; + const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); + defer mod.gpa.free(new_elems); + const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); + for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| + new_elem.* = try old_elem.intern(switch (ty_key) { + .struct_type => ty.structFieldType(field_i, mod), + .anon_struct_type => |info| info.types[field_i].toType(), + inline .array_type, .vector_type => |info| info.child.toType(), + else => unreachable, + }, mod); + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = new_elems }, + } }); + }, + .@"union" => { + const pl = val.castTag(.@"union").?.data; + return mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = try pl.tag.intern(ty.unionTagTypeHypothetical(mod), mod), + .val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod), + } }); + }, + } + } + + pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { + return if (val.ip_index == .none) val else switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + => val, + + .error_union => |error_union| switch (error_union.val) { + .err_name => val, + .payload => |payload| Tag.eu_payload.create(arena, payload.toValue()), + }, + + .ptr => |ptr| switch (ptr.len) { + .none => val, + else => |len| Tag.slice.create(arena, .{ + .ptr = val.slicePtr(mod), + .len = len.toValue(), + }), + }, + + .opt => |opt| switch (opt.val) { + .none => val, + else => |payload| Tag.opt_payload.create(arena, payload.toValue()), + }, + + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| Tag.bytes.create(arena, try arena.dupe(u8, bytes)), + .elems => |old_elems| { + const new_elems = try arena.alloc(Value, old_elems.len); + for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); + return Tag.aggregate.create(arena, new_elems); + }, + .repeated_elem => |elem| Tag.repeated.create(arena, elem.toValue()), + }, + + .un => |un| Tag.@"union".create(arena, .{ + .tag = un.tag.toValue(), + .val = un.val.toValue(), + }), + + .memoized_call => unreachable, + }; + } + + pub fn toIntern(val: Value) InternPool.Index { + assert(val.ip_index != .none); + return val.ip_index; + } /// Asserts that the value is representable as a type. - pub fn toType(self: Value, buffer: *ToTypeBuffer) Type { - return switch (self.tag()) { - .ty => self.castTag(.ty).?.data, - .u1_type => Type.initTag(.u1), - .u8_type => Type.initTag(.u8), - .i8_type => Type.initTag(.i8), - .u16_type => Type.initTag(.u16), - .i16_type => Type.initTag(.i16), - .u29_type => Type.initTag(.u29), - .u32_type => Type.initTag(.u32), - .i32_type => Type.initTag(.i32), - .u64_type => Type.initTag(.u64), - .i64_type => Type.initTag(.i64), - .u128_type => Type.initTag(.u128), - .i128_type => Type.initTag(.i128), - .usize_type => Type.initTag(.usize), - .isize_type => Type.initTag(.isize), - .c_char_type => Type.initTag(.c_char), - .c_short_type => Type.initTag(.c_short), - .c_ushort_type => Type.initTag(.c_ushort), - .c_int_type => Type.initTag(.c_int), - .c_uint_type => Type.initTag(.c_uint), - .c_long_type => Type.initTag(.c_long), - .c_ulong_type => Type.initTag(.c_ulong), - .c_longlong_type => Type.initTag(.c_longlong), - .c_ulonglong_type => Type.initTag(.c_ulonglong), - .c_longdouble_type => Type.initTag(.c_longdouble), - .f16_type => Type.initTag(.f16), - .f32_type => Type.initTag(.f32), - .f64_type => Type.initTag(.f64), - .f80_type => Type.initTag(.f80), - .f128_type => Type.initTag(.f128), - .anyopaque_type => Type.initTag(.anyopaque), - .bool_type => Type.initTag(.bool), - .void_type => Type.initTag(.void), - .type_type => Type.initTag(.type), - .anyerror_type => Type.initTag(.anyerror), - .comptime_int_type => Type.initTag(.comptime_int), - .comptime_float_type => Type.initTag(.comptime_float), - .noreturn_type => Type.initTag(.noreturn), - .null_type => Type.initTag(.null), - .undefined_type => Type.initTag(.undefined), - .fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args), - .fn_void_no_args_type => Type.initTag(.fn_void_no_args), - .fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args), - .fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args), - .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .anyframe_type => Type.initTag(.@"anyframe"), - .const_slice_u8_type => Type.initTag(.const_slice_u8), - .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), - .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .generic_poison_type => Type.initTag(.generic_poison), - .enum_literal_type => Type.initTag(.enum_literal), - .manyptr_u8_type => Type.initTag(.manyptr_u8), - .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), - .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), - .atomic_order_type => Type.initTag(.atomic_order), - .atomic_rmw_op_type => Type.initTag(.atomic_rmw_op), - .calling_convention_type => Type.initTag(.calling_convention), - .address_space_type => Type.initTag(.address_space), - .float_mode_type => Type.initTag(.float_mode), - .reduce_op_type => Type.initTag(.reduce_op), - .modifier_type => Type.initTag(.modifier), - .prefetch_options_type => Type.initTag(.prefetch_options), - .export_options_type => Type.initTag(.export_options), - .extern_options_type => Type.initTag(.extern_options), - .type_info_type => Type.initTag(.type_info), - - .int_type => { - const payload = self.castTag(.int_type).?.data; - buffer.* = .{ - .base = .{ - .tag = if (payload.signed) .int_signed else .int_unsigned, - }, - .data = payload.bits, - }; - return Type.initPayload(&buffer.base); - }, - - else => unreachable, - }; + pub fn toType(self: Value) Type { + return self.toIntern().toType(); } - /// Asserts the type is an enum type. - pub fn toEnum(val: Value, comptime E: type) E { - switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - return @intToEnum(E, field_index); - }, - .the_only_possible_value => { - const fields = std.meta.fields(E); - assert(fields.len == 1); - return @intToEnum(E, fields[0].value); - }, - else => unreachable, - } - } - - pub fn enumToInt(val: Value, ty: Type, buffer: *Payload.U64) Value { - const field_index = switch (val.tag()) { - .enum_field_index => val.castTag(.enum_field_index).?.data, - .the_only_possible_value => blk: { - assert(ty.enumFieldCount() == 1); - break :blk 0; - }, - .enum_literal => i: { - const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name).?; - }, + pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) { // Assume it is already an integer and return it directly. - else => return val, - }; - - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - return enum_full.values.keys()[field_index]; - } else { - // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - return enum_obj.values.keys()[field_index]; - } else { - // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); - } - }, - .enum_simple => { - // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); - }, - else => unreachable, - } - } - - pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag() == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); - - const field_index = switch (val.tag()) { - .enum_field_index => val.castTag(.enum_field_index).?.data, - .the_only_possible_value => blk: { - assert(ty.enumFieldCount() == 1); - break :blk 0; - }, - .enum_literal => return val.castTag(.enum_literal).?.data, - else => field_index: { - const values = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values, - .enum_numbered => ty.castTag(.enum_numbered).?.data.values, - .enum_simple => Module.EnumFull.ValueMap{}, + .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(enum_literal, mod).?; + return switch (ip.indexToKey(ty.toIntern())) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), else => unreachable, }; - if (values.entries.len == 0) { - // auto-numbered enum - break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget())); - } - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, - }; - - const fields = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.fields, - .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - .enum_simple => ty.castTag(.enum_simple).?.data.fields, + .enum_type => |enum_type| try mod.getCoerced(val, enum_type.tag_ty.toType()), else => unreachable, }; - return fields.keys()[field_index]; } /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst { - return val.toBigIntAdvanced(space, target, null) catch unreachable; + pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { + return val.toBigIntAdvanced(space, mod, null) catch unreachable; } /// Asserts the value is an integer. pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - target: Target, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { - switch (val.tag()) { - .null_value, - .zero, - .bool_false, - .the_only_possible_value, // i0, u0 - => return BigIntMutable.init(&space.limbs, 0).toConst(), - - .one, - .bool_true, - => return BigIntMutable.init(&space.limbs, 1).toConst(), - - .enum_field_index => { - const index = val.castTag(.enum_field_index).?.data; - return BigIntMutable.init(&space.limbs, index).toConst(); + return switch (val.toIntern()) { + .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), + .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), + .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => int.storage.toBigInt(space), + .lazy_align, .lazy_size => |ty| { + if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType()); + const x = switch (int.storage) { + else => unreachable, + .lazy_align => ty.toType().abiAlignment(mod), + .lazy_size => ty.toType().abiSize(mod), + }; + return BigIntMutable.init(&space.limbs, x).toConst(); + }, + }, + .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .opt, .ptr => BigIntMutable.init( + &space.limbs, + (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + ).toConst(), + else => unreachable, }, - .runtime_value => { - const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, target, opt_sema); - }, - .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), - .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), + }; + } - .undef => unreachable, + pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn { + return mod.funcPtrUnwrap(val.getFunctionIndex(mod)); + } - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiAlignment(target); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiSize(target); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, + pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.toIntern()) else .none; + } - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(target, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(target); - const new_addr = array_addr + elem_size * elem_ptr.index; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); - }, + pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| extern_func, + else => null, + } else null; + } - else => unreachable, - } + pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable, + else => null, + } else null; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, target: Target) ?u64 { - return getUnsignedIntAdvanced(val, target, null) catch unreachable; + pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { + return getUnsignedIntAdvanced(val, mod, null) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, target: Target, opt_sema: ?*Sema) !?u64 { - switch (val.tag()) { - .zero, - .bool_false, - .the_only_possible_value, // i0, u0 - => return 0, - - .one, - .bool_true, - => return 1, - - .int_u64 => return val.castTag(.int_u64).?.data, - .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - + pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { + return switch (val.toIntern()) { .undef => unreachable, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; - } else { - return ty.abiAlignment(target); - } + .bool_false => 0, + .bool_true => 1, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.to(u64) catch null, + .u64 => |x| x, + .i64 => |x| std.math.cast(u64, x), + .lazy_align => |ty| if (opt_sema) |sema| + (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiAlignment(mod), + .lazy_size => |ty| if (opt_sema) |sema| + (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiSize(mod), + }, + .ptr => |ptr| switch (ptr.addr) { + .int => |int| int.toValue().getUnsignedIntAdvanced(mod, opt_sema), + .elem => |elem| { + const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const elem_ty = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + return base_addr + elem.index * elem_ty.abiSize(mod); + }, + .field => |field| { + const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + return base_addr + struct_ty.structFieldOffset(@intCast(usize, field.index), mod); + }, + else => null, + }, + .opt => |opt| switch (opt.val) { + .none => 0, + else => |payload| payload.toValue().getUnsignedIntAdvanced(mod, opt_sema), + }, + else => null, }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar; - } else { - return ty.abiSize(target); - } - }, - - else => return null, - } + }; } /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, target: Target) u64 { - return getUnsignedInt(val, target).?; + pub fn toUnsignedInt(val: Value, mod: *Module) u64 { + return getUnsignedInt(val, mod).?; } /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, target: Target) i64 { - switch (val.tag()) { - .zero, - .bool_false, - .the_only_possible_value, // i0, u0 - => return 0, - - .one, - .bool_true, - => return 1, - - .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data), - .int_i64 => return val.castTag(.int_i64).?.data, - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(target)); + pub fn toSignedInt(val: Value, mod: *Module) i64 { + return switch (val.toIntern()) { + .bool_false => 0, + .bool_true => 1, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.to(i64) catch unreachable, + .i64 => |x| x, + .u64 => |x| @intCast(i64, x), + .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)), + }, + else => unreachable, }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(target)); - }, - - .undef => unreachable, - else => unreachable, - } + }; } - pub fn toBool(self: Value) bool { - return switch (self.tag()) { - .bool_true, .one => true, - .bool_false, .zero => false, - .int_u64 => switch (self.castTag(.int_u64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - .int_i64 => switch (self.castTag(.int_i64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, + pub fn toBool(val: Value) bool { + return switch (val.toIntern()) { + .bool_true => true, + .bool_false => false, else => unreachable, }; } - fn isDeclRef(val: Value) bool { + fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return true, + .eu_payload, .opt_payload => |base| check = base.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), + else => return false, + }, else => return false, }; } @@ -1272,62 +691,45 @@ pub const Value = extern union { ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented, + OutOfMemory, }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (val.isUndef()) { - const size = @intCast(usize, ty.abiSize(target)); + if (val.isUndef(mod)) { + const size = @intCast(usize, ty.abiSize(mod)); @memset(buffer[0..size], 0xaa); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { buffer[0] = @boolToInt(val.toBool()); }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); - - if (byte_count <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), - else => unreachable, - }; - for (buffer[0..byte_count], 0..) |_, i| switch (endian) { - .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - }; - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); - bigint.writeTwosComplement(buffer[0..byte_count], endian); - } + var bigint_buffer: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buffer, mod); + bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Array => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); - const elem_size = @intCast(usize, elem_ty.abiSize(target)); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); + const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { - const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, elem_i); try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); buf_off += elem_size; } @@ -1335,52 +737,63 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, - .Extern => { - const fields = ty.structFields().values(); - const field_vals = val.castTag(.aggregate).?.data; - for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); - try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]); - } + .Extern => for (ty.structFields(mod).values(), 0..) |field, i| { + const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const field_val = switch (val.ip_index) { + .none => val.castTag(.aggregate).?.data[i], + else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) { + .bytes => |bytes| { + buffer[off] = bytes[i]; + continue; + }, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }.toValue(), + }; + try writeToMemory(field_val, field.ty, mod, buffer[off..]); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; + const name = switch (mod.intern_pool.indexToKey(val.toIntern())) { + .err => |err| err.name, + .error_union => |error_union| error_union.val.err_name, + else => unreachable, + }; + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, .Pointer => { - if (ty.isSlice()) return error.IllDefinedMemoryLayout; - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { - if (!ty.isPtrLikeOptional()) return error.IllDefinedMemoryLayout; - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; + const child = ty.optionalChild(mod); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { - return writeToMemory(Value.zero, Type.usize, mod, buffer); + return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); } }, else => return error.Unimplemented, @@ -1391,15 +804,21 @@ pub const Value = extern union { /// /// Both the start and the end of the provided buffer must be tight, since /// big-endian packed memory layouts start at the end of the buffer. - pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void { + pub fn writeToPackedMemory( + val: Value, + ty: Type, + mod: *Module, + buffer: []u8, + bit_offset: usize, + ) error{ ReinterpretDeclRef, OutOfMemory }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (val.isUndef()) { - const bit_size = @intCast(usize, ty.bitSize(target)); + if (val.isUndef(mod)) { + const bit_size = @intCast(usize, ty.bitSize(mod)); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { const byte_index = switch (endian) { @@ -1413,91 +832,82 @@ pub const Value = extern union { } }, .Int, .Enum => { - const bits = ty.intInfo(target).bits; - const abi_size = @intCast(usize, ty.abiSize(target)); + if (buffer.len == 0) return; + const bits = ty.intInfo(mod).bits; + if (bits == 0) return; - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); - - if (abi_size == 0) return; - if (abi_size <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), - else => unreachable, - }; - std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); - bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); + switch (mod.intern_pool.indexToKey((try val.enumToInt(ty, mod)).toIntern()).int.storage) { + inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), + .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), + else => unreachable, } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); - const len = @intCast(usize, ty.arrayLen()); + const elem_ty = ty.childType(mod); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); + const len = @intCast(usize, ty.arrayLen(mod)); var bits: u16 = 0; var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (elem_i < len) : (elem_i += 1) { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i; - const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, tgt_elem_i); try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); bits += elem_bit_size; } }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { var bits: u16 = 0; - const fields = ty.structFields().values(); - const field_vals = val.castTag(.aggregate).?.data; + const fields = ty.structFields(mod).values(); + const storage = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage; for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); - try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); + const field_val = switch (storage) { + .bytes => unreachable, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }; + try field_val.toValue().writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); bits += field_bits; } }, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { - const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); - const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, field_index.?); + const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod); + const field_type = ty.unionFields(mod).values()[field_index.?].ty; + const field_val = try val.fieldValue(mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. - if (val.isDeclRef()) return error.ReinterpretDeclRef; + assert(!ty.isSlice(mod)); // No well defined layout. + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { - assert(ty.isPtrLikeOptional()); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + assert(ty.isPtrLikeOptional(mod)); + const child = ty.optionalChild(mod); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { - return writeToPackedMemory(Value.zero, Type.usize, mod, buffer, bit_offset); + return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); } }, else => @panic("TODO implement writeToPackedMemory for more types"), @@ -1516,7 +926,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { if (buffer[0] == 0) { @@ -1525,20 +935,27 @@ pub const Value = extern union { return Value.true; } }, - .Int, .Enum => { - const int_info = ty.intInfo(target); + .Int, .Enum => |ty_tag| { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + const int_info = int_ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return Value.zero; + if (bits == 0 or buffer.len == 0) return mod.getCoerced(try mod.intValue(int_ty, 0), ty); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - return Value.Tag.int_i64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - return Value.Tag.int_u64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; @@ -1547,48 +964,57 @@ pub const Value = extern union { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty); } }, - .Float => switch (ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian))), - 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian))), - 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian))), - 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian))), - 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian))), - else => unreachable, - }, + .Float => return (try mod.intern(.{ .float = .{ + .ty = ty.toIntern(), + .storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, + 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, + 64 => .{ .f64 = @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian)) }, + 80 => .{ .f80 = @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian)) }, + 128 => .{ .f128 = @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian)) }, + else => unreachable, + }, + } })).toValue(), .Array => { - const elem_ty = ty.childType(); - const elem_size = elem_ty.abiSize(target); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elem_ty = ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { - elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); + elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); offset += @intCast(usize, elem_size); } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { - const fields = ty.structFields().values(); - const field_vals = try arena.alloc(Value, fields.len); - for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); - const sz = @intCast(usize, ty.structFieldType(i).abiSize(target)); - field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); + const fields = ty.structFields(mod).values(); + const field_vals = try arena.alloc(InternPool.Index, fields.len); + for (field_vals, fields, 0..) |*field_val, field, i| { + const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const sz = @intCast(usize, field.ty.abiSize(mod)); + field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod); } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, @@ -1596,22 +1022,19 @@ pub const Value = extern union { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - - const payload = try arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = mod.error_name_list.items[@intCast(usize, int)] }, - }; - return Value.initPayload(&payload.base); + const name = mod.global_error_set.keys()[@intCast(usize, int)]; + return (try mod.intern(.{ .err = .{ + .ty = ty.toIntern(), + .name = name, + } })).toValue(); }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. return readFromMemory(Type.usize, mod, buffer, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + assert(ty.isPtrLikeOptional(mod)); + const child = ty.optionalChild(mod); return readFromMemory(child, mod, buffer, arena); }, else => @panic("TODO implement readFromMemory for more types"), @@ -1631,7 +1054,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { const byte = switch (endian) { @@ -1644,71 +1067,94 @@ pub const Value = extern union { return Value.true; } }, - .Int, .Enum => { - if (buffer.len == 0) return Value.zero; - const int_info = ty.intInfo(target); - const abi_size = @intCast(usize, ty.abiSize(target)); - + .Int, .Enum => |ty_tag| { + if (buffer.len == 0) return mod.intValue(ty, 0); + const int_info = ty.intInfo(mod); const bits = int_info.bits; - if (bits == 0) return Value.zero; - if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), - .unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), - } else { // Slow path, we have to construct a big-int - const Limb = std.math.big.Limb; - const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); + if (bits == 0) return mod.intValue(ty, 0); - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + // Fast path for integers <= u64 + if (bits <= 64) { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + return mod.getCoerced(switch (int_info.signedness) { + .signed => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed), + ), + .unsigned => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned), + ), + }, ty); } + + // Slow path, we have to construct a big-int + const abi_size = @intCast(usize, ty.abiSize(mod)); + const Limb = std.math.big.Limb; + const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); + return mod.intValue_big(ty, bigint.toConst()); }, - .Float => switch (ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian))), - 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian))), - 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian))), - 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian))), - 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian))), - else => unreachable, - }, + .Float => return (try mod.intern(.{ .float = .{ + .ty = ty.toIntern(), + .storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, + 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, + 64 => .{ .f64 = @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian)) }, + 80 => .{ .f80 = @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian)) }, + 128 => .{ .f128 = @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian)) }, + else => unreachable, + }, + } })).toValue(), .Vector => { - const elem_ty = ty.childType(); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elem_ty = ty.childType(mod); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; - elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena); + elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod); bits += elem_bit_size; } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled by non-packed readFromMemory .Packed => { var bits: u16 = 0; - const fields = ty.structFields().values(); - const field_vals = try arena.alloc(Value, fields.len); + const fields = ty.structFields(mod).values(); + const field_vals = try arena.alloc(InternPool.Index, fields.len); for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); - field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); + field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod); bits += field_bits; } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + assert(ty.isPtrLikeOptional(mod)); + const child = ty.optionalChild(mod); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); }, else => @panic("TODO implement readFromPackedMemory for more types"), @@ -1716,31 +1162,22 @@ pub const Value = extern union { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type) T { - return switch (val.tag()) { - .float_16 => @floatCast(T, val.castTag(.float_16).?.data), - .float_32 => @floatCast(T, val.castTag(.float_32).?.data), - .float_64 => @floatCast(T, val.castTag(.float_64).?.data), - .float_80 => @floatCast(T, val.castTag(.float_80).?.data), - .float_128 => @floatCast(T, val.castTag(.float_128).?.data), - - .zero => 0, - .one => 1, - .int_u64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_u64).?.data); + pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + inline .u64, .i64 => |x| { + if (T == f80) { + @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); + } + return @intToFloat(T, x); + }, + .lazy_align => |ty| @intToFloat(T, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intToFloat(T, ty.toType().abiSize(mod)), }, - .int_i64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_i64).?.data); + .float => |float| switch (float.storage) { + inline else => |x| @floatCast(T, x), }, - - .int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)), - .int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)), else => unreachable, }; } @@ -1764,103 +1201,29 @@ pub const Value = extern union { } } - pub fn clz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; - switch (val.tag()) { - .zero, .bool_false => return ty_bits, - .one, .bool_true => return ty_bits - 1, - - .int_u64 => { - const big = @clz(val.castTag(.int_u64).?.data); - return big + ty_bits - 64; - }, - .int_i64 => { - @panic("TODO implement i64 Value clz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.clz(ty_bits); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value clz"); - }, - - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; - return bigint.clz(ty_bits); - }, - - else => unreachable, - } + pub fn clz(val: Value, ty: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.clz(ty.intInfo(mod).bits); } - pub fn ctz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; - switch (val.tag()) { - .zero, .bool_false => return ty_bits, - .one, .bool_true => return 0, - - .int_u64 => { - const big = @ctz(val.castTag(.int_u64).?.data); - return if (big == 64) ty_bits else big; - }, - .int_i64 => { - @panic("TODO implement i64 Value ctz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.ctz(); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value ctz"); - }, - - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; - return bigint.ctz(); - }, - - else => unreachable, - } + pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.ctz(ty.intInfo(mod).bits); } - pub fn popCount(val: Value, ty: Type, target: Target) u64 { - assert(!val.isUndef()); - switch (val.tag()) { - .zero, .bool_false => return 0, - .one, .bool_true => return 1, - - .int_u64 => return @popCount(val.castTag(.int_u64).?.data), - - else => { - const info = ty.intInfo(target); - - var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, target); - return @intCast(u64, int.popCount(info.bits)); - }, - } + pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits)); } - pub fn bitReverse(val: Value, ty: Type, target: Target, arena: Allocator) !Value { - assert(!val.isUndef()); - - const info = ty.intInfo(target); + pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1869,19 +1232,17 @@ pub const Value = extern union { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, target: Target, arena: Allocator) !Value { - assert(!val.isUndef()); - - const info = ty.intInfo(target); + pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { + const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 assert(info.bits % 8 == 0); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1890,176 +1251,98 @@ pub const Value = extern union { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, target: Target) usize { - switch (self.tag()) { - .zero, - .bool_false, - .the_only_possible_value, - => return 0, - - .one, - .bool_true, - => return 1, - - .int_u64 => { - const x = self.castTag(.int_u64).?.data; - if (x == 0) return 0; - return @intCast(usize, std.math.log2(x) + 1); - }, - .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), - - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .decl_ref, - .function, - .variable, - .eu_payload_ptr, - .opt_payload_ptr, - => return target.ptrBitWidth(), - - else => { - var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, target).bitCountTwosComp(); - }, - } + pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { + var buffer: BigIntSpace = undefined; + const big_int = self.toBigInt(&buffer, mod); + return big_int.bitCountTwosComp(); } /// Converts an integer or a float to a float. May result in a loss of information. /// Caller can find out by equality checking the result against the operand. - pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value { - switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128)), - else => unreachable, - } + pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { + const target = mod.getTarget(); + return (try mod.intern(.{ .float = .{ + .ty = dest_ty.toIntern(), + .storage = switch (dest_ty.floatBits(target)) { + 16 => .{ .f16 = self.toFloat(f16, mod) }, + 32 => .{ .f32 = self.toFloat(f32, mod) }, + 64 => .{ .f64 = self.toFloat(f64, mod) }, + 80 => .{ .f80 = self.toFloat(f80, mod) }, + 128 => .{ .f128 = self.toFloat(f128, mod) }, + else => unreachable, + }, + } })).toValue(); } /// Asserts the value is a float - pub fn floatHasFraction(self: Value) bool { - return switch (self.tag()) { - .zero, - .one, - => false, - - .float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0, - .float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0, - .float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0, - //.float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0, - .float_80 => @panic("TODO implement __remx in compiler-rt"), - .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0, - + pub fn floatHasFraction(self: Value, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(self.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| @rem(x, 1) != 0, + }, else => unreachable, }; } - pub fn orderAgainstZero(lhs: Value) std.math.Order { - return orderAgainstZeroAdvanced(lhs, null) catch unreachable; + pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { + return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - return switch (lhs.tag()) { - .zero, - .bool_false, - .the_only_possible_value, - => .eq, - - .one, - .bool_true, - .decl_ref, - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .function, - .variable, - => .gt, - - .enum_field_index => return std.math.order(lhs.castTag(.enum_field_index).?.data, 0), - .runtime_value => { - // This is needed to correctly handle hashing the value. - // Checks in Sema should prevent direct comparisons from reaching here. - const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(opt_sema); - }, - .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), - .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), - .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), - .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), - - .lazy_align => { - const ty = lhs.castTag(.lazy_align).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - .lazy_size => { - const ty = lhs.castTag(.lazy_size).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - - .float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0), - .float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0), - .float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0), - .float_80 => std.math.order(lhs.castTag(.float_80).?.data, 0), - .float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0), - - .elem_ptr => { - const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(opt_sema)) { - .lt => unreachable, - .gt => return .gt, - .eq => { - if (elem_ptr.index == 0) { - return .eq; - } else { - return .gt; - } + return switch (lhs.toIntern()) { + .bool_false => .eq, + .bool_true => .gt, + else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => .gt, + .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), + .elem => |elem| switch (try elem.base.toValue().orderAgainstZeroAdvanced(mod, opt_sema)) { + .lt => unreachable, + .gt => .gt, + .eq => if (elem.index == 0) .eq else .gt, }, - } + else => unreachable, + }, + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.orderAgainstScalar(0), + inline .u64, .i64 => |x| std.math.order(x, 0), + .lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced( + mod, + false, + if (opt_sema) |sema| .{ .sema = sema } else .eager, + ) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) .gt else .eq, + }, + .enum_tag => |enum_tag| enum_tag.int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), + .float => |float| switch (float.storage) { + inline else => |x| std.math.order(x, 0), + }, + else => unreachable, }, - - else => unreachable, }; } /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order { - return orderAdvanced(lhs, rhs, target, null) catch unreachable; + pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { + return orderAdvanced(lhs, rhs, mod, null) catch unreachable; } /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, opt_sema: ?*Sema) !std.math.Order { - const lhs_tag = lhs.tag(); - const rhs_tag = rhs.tag(); - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(opt_sema); + pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -2071,48 +1354,34 @@ pub const Value = extern union { .gt => {}, } - const lhs_float = lhs.isFloat(); - const rhs_float = rhs.isFloat(); - if (lhs_float and rhs_float) { - if (lhs_tag == rhs_tag) { - return switch (lhs.tag()) { - .float_16 => return std.math.order(lhs.castTag(.float_16).?.data, rhs.castTag(.float_16).?.data), - .float_32 => return std.math.order(lhs.castTag(.float_32).?.data, rhs.castTag(.float_32).?.data), - .float_64 => return std.math.order(lhs.castTag(.float_64).?.data, rhs.castTag(.float_64).?.data), - .float_80 => return std.math.order(lhs.castTag(.float_80).?.data, rhs.castTag(.float_80).?.data), - .float_128 => return std.math.order(lhs.castTag(.float_128).?.data, rhs.castTag(.float_128).?.data), - else => unreachable, - }; - } - } - if (lhs_float or rhs_float) { - const lhs_f128 = lhs.toFloat(f128); - const rhs_f128 = rhs.toFloat(f128); + if (lhs.isFloat(mod) or rhs.isFloat(mod)) { + const lhs_f128 = lhs.toFloat(f128, mod); + const rhs_f128 = rhs.toFloat(f128, mod); return std.math.order(lhs_f128, rhs_f128); } var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool { - return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable; + pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { + return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; } pub fn compareHeteroAdvanced( lhs: Value, op: std.math.CompareOperator, rhs: Value, - target: Target, + mod: *Module, opt_sema: ?*Sema, ) !bool { - if (lhs.pointerDecl()) |lhs_decl| { - if (rhs.pointerDecl()) |rhs_decl| { + if (lhs.pointerDecl(mod)) |lhs_decl| { + if (rhs.pointerDecl(mod)) |rhs_decl| { switch (op) { .eq => return lhs_decl == rhs_decl, .neq => return lhs_decl != rhs_decl, @@ -2125,27 +1394,25 @@ pub const Value = extern union { else => {}, } } - } else if (rhs.pointerDecl()) |_| { + } else if (rhs.pointerDecl(mod)) |_| { switch (op) { .eq => return false, .neq => return true, else => {}, } } - return (try orderAdvanced(lhs, rhs, target, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. - pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { - if (ty.zigTypeTag() == .Vector) { - var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) { + pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { + if (ty.zigTypeTag(mod) == .Vector) { + const scalar_ty = ty.scalarType(mod); + for (0..ty.vectorLen(mod)) |i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { return false; } } @@ -2165,7 +1432,7 @@ pub const Value = extern union { return switch (op) { .eq => lhs.eql(rhs, ty, mod), .neq => !lhs.eql(rhs, ty, mod), - else => compareHetero(lhs, op, rhs, mod.getTarget()), + else => compareHetero(lhs, op, rhs, mod), }; } @@ -2191,47 +1458,31 @@ pub const Value = extern union { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!bool { - if (lhs.isInf()) { + if (lhs.isInf(mod)) { switch (op) { .neq => return true, .eq => return false, - .gt, .gte => return !lhs.isNegativeInf(), - .lt, .lte => return lhs.isNegativeInf(), + .gt, .gte => return !lhs.isNegativeInf(mod), + .lt, .lte => return lhs.isNegativeInf(mod), } } - switch (lhs.tag()) { - .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema), - .aggregate => { - for (lhs.castTag(.aggregate).?.data) |elem_val| { - if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; - } - return true; + switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| if (std.math.isNan(x)) return op == .neq, }, - .empty_array => return true, - .str_lit => { - const str_lit = lhs.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), }, - .bytes => { - const bytes = lhs.castTag(.bytes).?.data; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - .float_16 => if (std.math.isNan(lhs.castTag(.float_16).?.data)) return op == .neq, - .float_32 => if (std.math.isNan(lhs.castTag(.float_32).?.data)) return op == .neq, - .float_64 => if (std.math.isNan(lhs.castTag(.float_64).?.data)) return op == .neq, - .float_80 => if (std.math.isNan(lhs.castTag(.float_80).?.data)) return op == .neq, - .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -2255,109 +1506,42 @@ pub const Value = extern union { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!bool { + if (a.ip_index != .none or b.ip_index != .none) return a.ip_index == b.ip_index; + const target = mod.getTarget(); const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .undef => return true, - .void_value, .null_value, .the_only_possible_value, .empty_struct_value => return true, - .enum_literal => { - const a_name = a.castTag(.enum_literal).?.data; - const b_name = b.castTag(.enum_literal).?.data; - return std.mem.eql(u8, a_name, b_name); - }, - .enum_field_index => { - const a_field_index = a.castTag(.enum_field_index).?.data; - const b_field_index = b.castTag(.enum_field_index).?.data; - return a_field_index == b_field_index; - }, - .opt_payload => { - const a_payload = a.castTag(.opt_payload).?.data; - const b_payload = b.castTag(.opt_payload).?.data; - var buffer: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buffer); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .slice => { - const a_payload = a.castTag(.slice).?.data; - const b_payload = b.castTag(.slice).?.data; - if (!(try eqlAdvanced(a_payload.len, Type.usize, b_payload.len, Type.usize, mod, opt_sema))) { - return false; - } - - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - - return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); - }, - .elem_ptr => { - const a_payload = a.castTag(.elem_ptr).?.data; - const b_payload = b.castTag(.elem_ptr).?.data; - if (a_payload.index != b_payload.index) return false; - - return eqlAdvanced(a_payload.array_ptr, ty, b_payload.array_ptr, ty, mod, opt_sema); - }, - .field_ptr => { - const a_payload = a.castTag(.field_ptr).?.data; - const b_payload = b.castTag(.field_ptr).?.data; - if (a_payload.field_index != b_payload.field_index) return false; - - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .@"error" => { - const a_name = a.castTag(.@"error").?.data.name; - const b_name = b.castTag(.@"error").?.data.name; - return std.mem.eql(u8, a_name, b_name); - }, - .eu_payload => { - const a_payload = a.castTag(.eu_payload).?.data; - const b_payload = b.castTag(.eu_payload).?.data; - const payload_ty = ty.errorUnionPayload(); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .eu_payload_ptr => { - const a_payload = a.castTag(.eu_payload_ptr).?.data; - const b_payload = b.castTag(.eu_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .opt_payload_ptr => { - const a_payload = a.castTag(.opt_payload_ptr).?.data; - const b_payload = b.castTag(.opt_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .function => { - const a_payload = a.castTag(.function).?.data; - const b_payload = b.castTag(.function).?.data; - return a_payload == b_payload; - }, .aggregate => { const a_field_vals = a.castTag(.aggregate).?.data; const b_field_vals = b.castTag(.aggregate).?.data; assert(a_field_vals.len == b_field_vals.len); - if (ty.isSimpleTupleOrAnonStruct()) { - const types = ty.tupleFields().types; - assert(types.len == a_field_vals.len); - for (types, 0..) |field_ty, i| { - if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) { - return false; + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct| { + assert(anon_struct.types.len == a_field_vals.len); + for (anon_struct.types, 0..) |field_ty, i| { + if (!(try eqlAdvanced(a_field_vals[i], field_ty.toType(), b_field_vals[i], field_ty.toType(), mod, opt_sema))) { + return false; + } } - } - return true; + return true; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const fields = struct_obj.fields.values(); + assert(fields.len == a_field_vals.len); + for (fields, 0..) |field, i| { + if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { + return false; + } + } + return true; + }, + else => {}, } - if (ty.zigTypeTag() == .Struct) { - const fields = ty.structFields().values(); - assert(fields.len == a_field_vals.len); - for (fields, 0..) |field, i| { - if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { - return false; - } - } - return true; - } - - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); for (a_field_vals, 0..) |a_elem, i| { const b_elem = b_field_vals[i]; @@ -2370,9 +1554,9 @@ pub const Value = extern union { .@"union" => { const a_union = a.castTag(.@"union").?.data; const b_union = b.castTag(.@"union").?.data; - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed, .Extern => { - const tag_ty = ty.unionTagTypeHypothetical(); + const tag_ty = ty.unionTagTypeHypothetical(mod); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { // In this case, we must disregard mismatching tags and compare // based on the in-memory bytes of the payloads. @@ -2380,7 +1564,7 @@ pub const Value = extern union { } }, .Auto => { - const tag_ty = ty.unionTagTypeHypothetical(); + const tag_ty = ty.unionTagTypeHypothetical(mod); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { return false; } @@ -2390,122 +1574,91 @@ pub const Value = extern union { return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema); }, else => {}, - } else if (b_tag == .null_value or b_tag == .@"error") { - return false; - } else if (a_tag == .undef or b_tag == .undef) { - return false; - } + }; - if (a.pointerDecl()) |a_decl| { - if (b.pointerDecl()) |b_decl| { + if (a.pointerDecl(mod)) |a_decl| { + if (b.pointerDecl(mod)) |b_decl| { return a_decl == b_decl; } else { return false; } - } else if (b.pointerDecl()) |_| { + } else if (b.pointerDecl(mod)) |_| { return false; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => { - var buf_a: ToTypeBuffer = undefined; - var buf_b: ToTypeBuffer = undefined; - const a_type = a.toType(&buf_a); - const b_type = b.toType(&buf_b); + const a_type = a.toType(); + const b_type = b.toType(); return a_type.eql(b_type, mod); }, .Enum => { - var buf_a: Payload.U64 = undefined; - var buf_b: Payload.U64 = undefined; - const a_val = a.enumToInt(ty, &buf_a); - const b_val = b.enumToInt(ty, &buf_b); - var buf_ty: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buf_ty); + const a_val = try a.enumToInt(ty, mod); + const b_val = try b.enumToInt(ty, mod); + const int_ty = ty.intTagType(mod); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var i: usize = 0; - var a_buf: ElemValueBuffer = undefined; - var b_buf: ElemValueBuffer = undefined; while (i < len) : (i += 1) { - const a_elem = elemValueBuffer(a, mod, i, &a_buf); - const b_elem = elemValueBuffer(b, mod, i, &b_buf); + const a_elem = try elemValue(a, mod, i); + const b_elem = try elemValue(b, mod, i); if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) { return false; } } return true; }, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { - const a_len = switch (a_ty.ptrSize()) { + const a_len = switch (a_ty.ptrSize(mod)) { .Slice => a.sliceLen(mod), - .One => a_ty.childType().arrayLen(), + .One => a_ty.childType(mod).arrayLen(mod), else => unreachable, }; if (a_len != b.sliceLen(mod)) { return false; } - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - const a_ptr = switch (a_ty.ptrSize()) { - .Slice => a.slicePtr(), + const ptr_ty = ty.slicePtrFieldType(mod); + const a_ptr = switch (a_ty.ptrSize(mod)) { + .Slice => a.slicePtr(mod), .One => a, else => unreachable, }; - return try eqlAdvanced(a_ptr, ptr_ty, b.slicePtr(), ptr_ty, mod, opt_sema); + return try eqlAdvanced(a_ptr, ptr_ty, b.slicePtr(mod), ptr_ty, mod, opt_sema); }, .Many, .C, .One => {}, }, .Struct => { // A struct can be represented with one of: - // .empty_struct_value, // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue() != null; + return (try ty.onePossibleValue(mod)) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue() != null) { + if ((try ty.onePossibleValue(mod)) != null) { return true; } - if (a_ty.castTag(.anon_struct)) |payload| { - const tuple = payload.data; - if (tuple.values.len != 1) { - return false; - } - const field_name = tuple.names[0]; - const union_obj = ty.cast(Type.Payload.Union).?.data; - const field_index = union_obj.fields.getIndex(field_name) orelse return false; - const tag_and_val = b.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); - if (!tag_matches) return false; - return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema); - } return false; }, .Float => { switch (ty.floatBits(target)) { - 16 => return @bitCast(u16, a.toFloat(f16)) == @bitCast(u16, b.toFloat(f16)), - 32 => return @bitCast(u32, a.toFloat(f32)) == @bitCast(u32, b.toFloat(f32)), - 64 => return @bitCast(u64, a.toFloat(f64)) == @bitCast(u64, b.toFloat(f64)), - 80 => return @bitCast(u80, a.toFloat(f80)) == @bitCast(u80, b.toFloat(f80)), - 128 => return @bitCast(u128, a.toFloat(f128)) == @bitCast(u128, b.toFloat(f128)), + 16 => return @bitCast(u16, a.toFloat(f16, mod)) == @bitCast(u16, b.toFloat(f16, mod)), + 32 => return @bitCast(u32, a.toFloat(f32, mod)) == @bitCast(u32, b.toFloat(f32, mod)), + 64 => return @bitCast(u64, a.toFloat(f64, mod)) == @bitCast(u64, b.toFloat(f64, mod)), + 80 => return @bitCast(u80, a.toFloat(f80, mod)) == @bitCast(u80, b.toFloat(f80, mod)), + 128 => return @bitCast(u128, a.toFloat(f128, mod)) == @bitCast(u128, b.toFloat(f128, mod)), else => unreachable, } }, .ComptimeFloat => { - const a_float = a.toFloat(f128); - const b_float = b.toFloat(f128); + const a_float = a.toFloat(f128, mod); + const b_float = b.toFloat(f128, mod); const a_nan = std.math.isNan(a_float); const b_nan = std.math.isNan(b_float); @@ -2514,570 +1667,215 @@ pub const Value = extern union { if (a_nan) return true; return a_float == b_float; }, - .Optional => if (a_tag != .null_value and b_tag == .opt_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, - .ErrorUnion => if (a_tag != .@"error" and b_tag == .eu_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, + .Optional, + .ErrorUnion, + => unreachable, // handled by InternPool else => {}, } - if (a_tag == .null_value or a_tag == .@"error") return false; - return (try orderAdvanced(a, b, target, opt_sema)).compare(.eq); + return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } - /// This function is used by hash maps and so treats floating-point NaNs as equal - /// to each other, and not equal to other floating-point values. - pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - const zig_ty_tag = ty.zigTypeTag(); - std.hash.autoHash(hasher, zig_ty_tag); - if (val.isUndef()) return; - // The value is runtime-known and shouldn't affect the hash. - if (val.tag() == .runtime_value) return; - - switch (zig_ty_tag) { - .Opaque => unreachable, // Cannot hash opaque types - - .Void, - .NoReturn, - .Undefined, - .Null, - => {}, - - .Type => { - var buf: ToTypeBuffer = undefined; - return val.toType(&buf).hashWithHasher(hasher, mod); + pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .mut_decl, .comptime_field => true, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), + .elem, .field => |base_index| base_index.base.toValue().isComptimeMutablePtr(mod), + else => false, }, - .Float => { - // For hash/eql purposes, we treat floats as their IEEE integer representation. - switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), - else => unreachable, - } - }, - .ComptimeFloat => { - const float = val.toFloat(f128); - const is_nan = std.math.isNan(float); - std.hash.autoHash(hasher, is_nan); - if (!is_nan) { - std.hash.autoHash(hasher, @bitCast(u128, float)); - } else { - std.hash.autoHash(hasher, std.math.signbit(float)); - } - }, - .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - hash(slice.ptr, ptr_ty, hasher, mod); - hash(slice.len, Type.usize, hasher, mod); - }, - - else => return hashPtr(val, hasher, mod.getTarget()), - }, - .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); - var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; - while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); - elem_val.hash(elem_ty, hasher, mod); - } - }, - .Struct => { - switch (val.tag()) { - .empty_struct_value => {}, - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - for (field_values, 0..) |field_val, i| { - const field_ty = ty.structFieldType(i); - field_val.hash(field_ty, hasher, mod); - } - }, - else => unreachable, - } - }, - .Optional => { - if (val.castTag(.opt_payload)) |payload| { - std.hash.autoHash(hasher, true); // non-null - const sub_val = payload.data; - var buffer: Type.Payload.ElemType = undefined; - const sub_ty = ty.optionalChild(&buffer); - sub_val.hash(sub_ty, hasher, mod); - } else { - std.hash.autoHash(hasher, false); // null - } - }, - .ErrorUnion => { - if (val.tag() == .@"error") { - std.hash.autoHash(hasher, false); // error - const sub_ty = ty.errorUnionSet(); - val.hash(sub_ty, hasher, mod); - return; - } - - if (val.castTag(.eu_payload)) |payload| { - std.hash.autoHash(hasher, true); // payload - const sub_ty = ty.errorUnionPayload(); - payload.data.hash(sub_ty, hasher, mod); - return; - } else unreachable; - }, - .ErrorSet => { - // just hash the literal error value. this is the most stable - // thing between compiler invocations. we can't use the error - // int cause (1) its not stable and (2) we don't have access to mod. - hasher.update(val.getError().?); - }, - .Enum => { - var enum_space: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_space); - hashInt(int_val, hasher, mod.getTarget()); - }, - .Union => { - const union_obj = val.cast(Payload.Union).?.data; - if (ty.unionTagType()) |tag_ty| { - union_obj.tag.hash(tag_ty, hasher, mod); - } - const active_field_ty = ty.unionFieldType(union_obj.tag, mod); - union_obj.val.hash(active_field_ty, hasher, mod); - }, - .Fn => { - // Note that this hashes the *Fn/*ExternFn rather than the *Decl. - // This is to differentiate function bodies from function pointers. - // This is currently redundant since we already hash the zig type tag - // at the top of this function. - if (val.castTag(.function)) |func| { - std.hash.autoHash(hasher, func.data); - } else if (val.castTag(.extern_fn)) |func| { - std.hash.autoHash(hasher, func.data); - } else unreachable; - }, - .Frame => { - @panic("TODO implement hashing frame values"); - }, - .AnyFrame => { - @panic("TODO implement hashing anyframe values"); - }, - .EnumLiteral => { - const bytes = val.castTag(.enum_literal).?.data; - hasher.update(bytes); - }, - } - } - - /// This is a more conservative hash function that produces equal hashes for values - /// that can coerce into each other. - /// This function is used by hash maps and so treats floating-point NaNs as equal - /// to each other, and not equal to other floating-point values. - pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (val.isUndef()) return; - // The value is runtime-known and shouldn't affect the hash. - if (val.tag() == .runtime_value) return; - - switch (ty.zigTypeTag()) { - .Opaque => unreachable, // Cannot hash opaque types - .Void, - .NoReturn, - .Undefined, - .Null, - .Struct, // It sure would be nice to do something clever with structs. - => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), - .Type => { - var buf: ToTypeBuffer = undefined; - val.toType(&buf).hashWithHasher(hasher, mod); - }, - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), - .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - slice.ptr.hashUncoerced(ptr_ty, hasher, mod); - }, - else => val.hashPtr(hasher, mod.getTarget()), - }, - .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); - var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; - while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); - elem_val.hashUncoerced(elem_ty, hasher, mod); - } - }, - .Optional => if (val.castTag(.opt_payload)) |payload| { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - payload.data.hashUncoerced(child_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { - const pl_ty = ty.errorUnionPayload(); - val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod); - }, - .Enum, .EnumLiteral, .Union => { - hasher.update(val.tagName(ty, mod)); - if (val.cast(Payload.Union)) |union_obj| { - const active_field_ty = ty.unionFieldType(union_obj.data.tag, mod); - union_obj.data.val.hashUncoerced(active_field_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Void); - }, - .Frame => @panic("TODO implement hashing frame values"), - .AnyFrame => @panic("TODO implement hashing anyframe values"), - } - } - - pub const ArrayHashContext = struct { - ty: Type, - mod: *Module, - - pub fn hash(self: @This(), val: Value) u32 { - const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod }; - return @truncate(u32, other_context.hash(val)); - } - pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool { - _ = b_index; - return a.eql(b, self.ty, self.mod); - } - }; - - pub const HashContext = struct { - ty: Type, - mod: *Module, - - pub fn hash(self: @This(), val: Value) u64 { - var hasher = std.hash.Wyhash.init(0); - val.hash(self.ty, &hasher, self.mod); - return hasher.final(); - } - - pub fn eql(self: @This(), a: Value, b: Value) bool { - return a.eql(b, self.ty, self.mod); - } - }; - - pub fn isComptimeMutablePtr(val: Value) bool { - return switch (val.tag()) { - .decl_ref_mut, .comptime_field_ptr => true, - .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), - .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), - .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), - .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), - .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr), - else => false, }; } - pub fn canMutateComptimeVarState(val: Value) bool { - if (val.isComptimeMutablePtr()) return true; - switch (val.tag()) { - .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), - .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .aggregate => { - const fields = val.castTag(.aggregate).?.data; - for (fields) |field| { - if (field.canMutateComptimeVarState()) return true; - } - return false; + pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { + return val.isComptimeMutablePtr(mod) or switch (val.toIntern()) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => false, + .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), + }, + .ptr => |ptr| switch (ptr.addr) { + .eu_payload, .opt_payload => |base| base.toValue().canMutateComptimeVarState(mod), + else => false, + }, + .opt => |opt| switch (opt.val) { + .none => false, + else => |payload| payload.toValue().canMutateComptimeVarState(mod), + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (elem.toValue().canMutateComptimeVarState(mod)) break true; + } else false, + .un => |un| un.val.toValue().canMutateComptimeVarState(mod), + else => false, }, - .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(), - .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(), - else => return false, - } + }; } /// Gets the decl referenced by this pointer. If the pointer does not point /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. - pub fn pointerDecl(val: Value) ?Module.Decl.Index { - return switch (val.tag()) { - .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, - .variable => val.castTag(.variable).?.data.owner_decl, - .decl_ref => val.cast(Payload.Decl).?.data, + pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.decl, + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => null, + }, else => null, }; } - fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { var buffer: BigIntSpace = undefined; - const big = int_val.toBigInt(&buffer, target); + const big = int_val.toBigInt(&buffer, mod); std.hash.autoHash(hasher, big.positive); for (big.limbs) |limb| { std.hash.autoHash(hasher, limb); } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, target: Target) void { - switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - .extern_fn, - .function, - .variable, - => { - const decl: Module.Decl.Index = ptr_val.pointerDecl().?; - std.hash.autoHash(hasher, decl); - }, - .comptime_field_ptr => { - std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr); - }, + pub const slice_ptr_index = 0; + pub const slice_len_index = 1; - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, target); - std.hash.autoHash(hasher, Value.Tag.elem_ptr); - std.hash.autoHash(hasher, elem_ptr.index); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, target); - std.hash.autoHash(hasher, field_ptr.field_index); - }, - .eu_payload_ptr => { - const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, target); - }, - .opt_payload_ptr => { - const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, target); - }, - - .zero, - .one, - .null_value, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - .bool_false, - .bool_true, - .the_only_possible_value, - .lazy_align, - .lazy_size, - => return hashInt(ptr_val, hasher, target), - - else => unreachable, - } - } - - pub fn slicePtr(val: Value) Value { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc. - .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val, - else => unreachable, - }; + pub fn slicePtr(val: Value, mod: *Module) Value { + return mod.intern_pool.slicePtr(val.toIntern()).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()), - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { - return decl.ty.arrayLen(); - } else { - return 1; - } + const ptr = mod.intern_pool.indexToKey(val.toIntern()).ptr; + return switch (ptr.len) { + .none => switch (mod.intern_pool.indexToKey(switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).ty.toIntern(), + .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).ty.toIntern(), + .comptime_field => |comptime_field| mod.intern_pool.typeOf(comptime_field), + else => unreachable, + })) { + .array_type => |array_type| array_type.len, + else => 1, }, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { - return decl.ty.arrayLen(); - } else { - return 1; - } - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag() == .Array) { - return payload.field_ty.arrayLen(); - } else { - return 1; - } - }, - else => unreachable, + else => ptr.len.toValue().toUnsignedInt(mod), }; } /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. - pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value { - return elemValueAdvanced(val, mod, index, arena, undefined); + pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { + return switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]), + .repeated => val.castTag(.repeated).?.data, + .aggregate => val.castTag(.aggregate).?.data[index], + .slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| (try mod.intern(.{ + .undef = ty.toType().elemType2(mod).toIntern(), + })).toValue(), + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)) + .toValue().elemValue(mod, index), + .int, .eu_payload => unreachable, + .opt_payload => |base| base.toValue().elemValue(mod, index), + .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), + .elem => |elem| elem.base.toValue().elemValue(mod, index + @intCast(usize, elem.index)), + .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { + const base_decl = mod.declPtr(decl_index); + const field_val = try base_decl.val.fieldValue(mod, @intCast(usize, field.index)); + return field_val.elemValue(mod, index); + } else unreachable, + }, + .opt => |opt| opt.val.toValue().elemValue(mod, index), + .aggregate => |aggregate| { + const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); + if (index < len) return switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }.toValue(); + assert(index == len); + return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue(); + }, + else => unreachable, + }, + }; } - pub const ElemValueBuffer = Payload.U64; - - pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value { - return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable; + pub fn isLazyAlign(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| int.storage == .lazy_align, + else => false, + }; } - pub fn elemValueAdvanced( - val: Value, - mod: *Module, - index: usize, - arena: ?Allocator, - buffer: *ElemValueBuffer, - ) error{OutOfMemory}!Value { - switch (val.tag()) { - // This is the case of accessing an element of an undef array. - .undef => return Value.undef, - .empty_array => unreachable, // out of bounds array index - .empty_struct_value => unreachable, // out of bounds array index + pub fn isLazySize(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| int.storage == .lazy_size, + else => false, + }; + } - .empty_array_sentinel => { - assert(index == 0); // The only valid index for an empty array with sentinel. - return val.castTag(.empty_array_sentinel).?.data; - }, - - .bytes => { - const byte = val.castTag(.bytes).?.data[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const byte = bytes[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } - }, - - // No matter the index; all the elements are the same! - .repeated => return val.castTag(.repeated).?.data, - - .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), - - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), - .elem_ptr => { - const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); - }, - .field_ptr => { - const data = val.castTag(.field_ptr).?.data; - if (data.container_ptr.pointerDecl()) |decl_index| { - const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, data.field_index); - return field_val.elemValueAdvanced(mod, index, arena, buffer); - } else unreachable; - }, - - // The child type of arrays which have only one possible value need - // to have only one possible value itself. - .the_only_possible_value => return val, - - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - - .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - - // These values will implicitly be treated as `repeated`. - .zero, - .one, - .bool_false, - .bool_true, - .int_i64, - .int_u64, - => return val, - - else => unreachable, - } + pub fn isRuntimeValue(val: Value, mod: *Module) bool { + return mod.intern_pool.indexToKey(val.toIntern()) == .runtime_value; } /// Returns true if a Value is backed by a variable - pub fn isVariable( - val: Value, - mod: *Module, - ) bool { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), - .decl_ref => { - const decl = mod.declPtr(val.castTag(.decl_ref).?.data); - assert(decl.has_tv); - return decl.val.isVariable(mod); - }, - .decl_ref_mut => { - const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); - assert(decl.has_tv); - return decl.val.isVariable(mod); - }, - + pub fn isVariable(val: Value, mod: *Module) bool { + return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => true, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isVariable(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isVariable(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isVariable(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isVariable(mod), + .elem, .field => |base_index| base_index.base.toValue().isVariable(mod), + }, else => false, }; } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (val.tag()) { + return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => false, else => val.isPtrToThreadLocalInner(mod), }; } - fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod), - .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod), - - .variable => val.castTag(.variable).?.data.is_threadlocal, + pub fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { + return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocalInner(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocalInner(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocalInner(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocalInner(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocalInner(mod), + }, else => false, }; } @@ -3090,238 +1888,239 @@ pub const Value = extern union { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.tag()) { - .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); + // TODO: write something like getCoercedInts to avoid needing to dupe + return switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr.sliceArray(mod, arena, start, end), + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .repeated => val, + .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), + else => unreachable, }, - .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), - .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end), - - .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end), - .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end), - .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end), - .elem_ptr => blk: { - const elem_ptr = val.castTag(.elem_ptr).?.data; - break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index); + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() + .sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| comptime_field.toValue() + .sliceArray(mod, arena, start, end), + .elem => |elem| elem.base.toValue() + .sliceArray(mod, arena, start + @intCast(usize, elem.index), end + @intCast(usize, elem.index)), + else => unreachable, + }, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { + .array_type => |array_type| try mod.arrayType(.{ + .len = @intCast(u32, end - start), + .child = array_type.child, + .sentinel = if (end == array_type.len) array_type.sentinel else .none, + }), + .vector_type => |vector_type| try mod.vectorType(.{ + .len = @intCast(u32, end - start), + .child = vector_type.child, + }), + else => unreachable, + }.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => .{ .bytes = try arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) }, + .elems => .{ .elems = try arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(), + else => unreachable, }, - - .repeated, - .the_only_possible_value, - => val, - - else => unreachable, }; } - pub fn fieldValue(val: Value, ty: Type, index: usize) Value { - switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; + pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { + return switch (val.ip_index) { + .none => switch (val.tag()) { + .aggregate => { + const field_values = val.castTag(.aggregate).?.data; + return field_values[index]; + }, + .@"union" => { + const payload = val.castTag(.@"union").?.data; + // TODO assert the tag is correct + return payload.val; + }, + else => unreachable, }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| (try mod.intern(.{ + .undef = ty.toType().structFieldType(index, mod).toIntern(), + })).toValue(), + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }.toValue(), // TODO assert the tag is correct - return payload.val; + .un => |un| un.val.toValue(), + else => unreachable, }, - - .the_only_possible_value => return ty.onePossibleValue().?, - - .empty_struct_value => { - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - return tuple.values[index]; - } - if (ty.structFieldValueComptime(index)) |some| { - return some; - } - unreachable; - }, - .undef => return Value.undef, - - else => unreachable, - } + }; } - pub fn unionTag(val: Value) Value { - switch (val.tag()) { - .undef, .enum_field_index => return val, - .@"union" => return val.castTag(.@"union").?.data.tag, + pub fn unionTag(val: Value, mod: *Module) Value { + if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef, .enum_tag => val, + .un => |un| un.tag.toValue(), else => unreachable, - } + }; } /// Returns a pointer to the element value at the index. pub fn elemPtr( val: Value, - ty: Type, - arena: Allocator, + elem_ptr_ty: Type, index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(); - const ptr_val = switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, + const elem_ty = elem_ptr_ty.childType(mod); + const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| ptr: { + switch (ptr.addr) { + .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) + return (try mod.intern(.{ .ptr = .{ + .ty = elem_ptr_ty.toIntern(), + .addr = .{ .elem = .{ + .base = elem.base, + .index = elem.index + index, + } }, + } })).toValue(), + else => {}, + } + break :ptr switch (ptr.len) { + .none => val, + else => val.slicePtr(mod), + }; + }, else => val, }; - - if (ptr_val.tag() == .elem_ptr) { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - if (elem_ptr.elem_ty.eql(elem_ty, mod)) { - return Tag.elem_ptr.create(arena, .{ - .array_ptr = elem_ptr.array_ptr, - .elem_ty = elem_ptr.elem_ty, - .index = elem_ptr.index + index, - }); - } - } - return Tag.elem_ptr.create(arena, .{ - .array_ptr = ptr_val, - .elem_ty = elem_ty, - .index = index, - }); + var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type; + assert(ptr_ty_key.flags.size != .Slice); + ptr_ty_key.flags.size = .Many; + return (try mod.intern(.{ .ptr = .{ + .ty = elem_ptr_ty.toIntern(), + .addr = .{ .elem = .{ + .base = (try mod.getCoerced(ptr_val, try mod.ptrType(ptr_ty_key))).toIntern(), + .index = index, + } }, + } })).toValue(); } - pub fn isUndef(self: Value) bool { - return self.tag() == .undef; - } - - /// TODO: check for cases such as array that is not marked undef but all the element - /// values are marked undef, or struct that is not marked undef but all fields are marked - /// undef, etc. - pub fn isUndefDeep(self: Value) bool { - return self.isUndef(); - } - - /// Returns true if any value contained in `self` is undefined. - /// TODO: check for cases such as array that is not marked undef but all the element - /// values are marked undef, or struct that is not marked undef but all fields are marked - /// undef, etc. - pub fn anyUndef(self: Value, mod: *Module) bool { - switch (self.tag()) { - .slice => { - const payload = self.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod.getTarget()); - - var elem_value_buf: ElemValueBuffer = undefined; - var i: usize = 0; - while (i < len) : (i += 1) { - const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); - if (elem_val.anyUndef(mod)) return true; - } - }, - - .aggregate => { - const payload = self.castTag(.aggregate).?; - for (payload.data) |val| { - if (val.anyUndef(mod)) return true; - } - }, - - .undef => return true, - else => {}, - } - - return false; - } - - /// Asserts the value is not undefined and not unreachable. - /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value) bool { - return switch (self.tag()) { - .null_value => true, - .opt_payload => false, - - // If it's not one of those two tags then it must be a C pointer value, - // in which case the value 0 is null and other values are non-null. - - .zero, - .bool_false, - .the_only_possible_value, - => true, - - .one, - .bool_true, - => false, - - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - => self.orderAgainstZero().compare(.eq), - - .undef => unreachable, - .unreachable_value => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - + pub fn isUndef(val: Value, mod: *Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => true, + .simple_value => |v| v == .undefined, else => false, }; } - /// Valid only for error (union) types. Asserts the value is not undefined and not - /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether - /// something is an error or not because it works without having to figure out the - /// string. - pub fn getError(self: Value) ?[]const u8 { - return switch (self.tag()) { - .@"error" => self.castTag(.@"error").?.data.name, - .int_u64 => @panic("TODO"), - .int_i64 => @panic("TODO"), - .int_big_positive => @panic("TODO"), - .int_big_negative => @panic("TODO"), - .one => @panic("TODO"), + /// TODO: check for cases such as array that is not marked undef but all the element + /// values are marked undef, or struct that is not marked undef but all fields are marked + /// undef, etc. + pub fn isUndefDeep(val: Value, mod: *Module) bool { + return val.isUndef(mod); + } + + /// Returns true if any value contained in `self` is undefined. + pub fn anyUndef(val: Value, mod: *Module) !bool { + if (val.ip_index == .none) return false; + return switch (val.toIntern()) { + .undef => true, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => true, + .simple_value => |v| v == .undefined, + .ptr => |ptr| switch (ptr.len) { + .none => false, + else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| { + if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; + } else false, + }, + .aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| { + const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i]; + if (try anyUndef(elem.toValue(), mod)) break true; + } else false, + else => false, + }, + }; + } + + /// Asserts the value is not undefined and not unreachable. + /// C pointers with an integer value of 0 are also considered null. + pub fn isNull(val: Value, mod: *Module) bool { + return switch (val.toIntern()) { .undef => unreachable, .unreachable_value => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - - else => null, + .null_value => true, + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, + .ptr => |ptr| switch (ptr.addr) { + .int => { + var buf: BigIntSpace = undefined; + return val.toBigInt(&buf, mod).eqZero(); + }, + else => false, + }, + .opt => |opt| opt.val == .none, + else => false, + }, }; } + /// Valid only for error (union) types. Asserts the value is not undefined and not unreachable. + pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTerminatedString { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .err => |err| err.name.toOptional(), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| err_name.toOptional(), + .payload => .none, + }, + else => unreachable, + }; + } + + pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { + return if (getErrorName(val, mod).unwrap()) |err_name| + @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err_name).?) + else + 0; + } + /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. - pub fn errorUnionIsPayload(val: Value) bool { - return switch (val.tag()) { - .eu_payload => true, - else => false, - - .undef => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - }; + pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { + return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. - pub fn optionalValue(val: Value) ?Value { - if (val.isNull()) return null; - - // Valid for optional representation to be the direct value - // and not use opt_payload. - return if (val.castTag(.opt_payload)) |p| p.data else val; + pub fn optionalValue(val: Value, mod: *const Module) ?Value { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .opt => |opt| switch (opt.val) { + .none => null, + else => |payload| payload.toValue(), + }, + .ptr => val, + else => unreachable, + }; } /// Valid for all types. Asserts the value is not undefined. - pub fn isFloat(self: Value) bool { - return switch (self.tag()) { + pub fn isFloat(self: Value, mod: *const Module) bool { + return switch (self.toIntern()) { .undef => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - - .float_16, - .float_32, - .float_64, - .float_80, - .float_128, - => true, - else => false, + else => switch (mod.intern_pool.indexToKey(self.toIntern())) { + .undef => unreachable, + .float => true, + else => false, + }, }; } @@ -3333,79 +2132,59 @@ pub const Value = extern union { } pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - const target = mod.getTarget(); - if (int_ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen()); + if (int_ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); + const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intToFloatScalar(val, arena, float_ty, target, opt_sema); + return intToFloatScalar(val, float_ty, mod, opt_sema); } - pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value { - switch (val.tag()) { - .undef, .zero, .one => return val, - .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 - .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); - }, - .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); - }, - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, float_ty, target); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, float_ty, target); - }, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => (try mod.intern(.{ .undef = float_ty.toIntern() })).toValue(), + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return mod.floatValue(float_ty, float); + }, + inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); } else { - return intToFloatInner(ty.abiAlignment(target), arena, float_ty, target); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); } else { - return intToFloatInner(ty.abiSize(target), arena, float_ty, target); - } + return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); + }, }, else => unreachable, - } + }; } - fn intToFloatInner(x: anytype, arena: Allocator, dest_ty: Type, target: Target) !Value { - switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)), - 32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)), - 64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)), - 80 => return Value.Tag.float_80.create(arena, @intToFloat(f80, x)), - 128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)), + fn intToFloatInner(x: anytype, dest_ty: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { + 16 => .{ .f16 = @intToFloat(f16, x) }, + 32 => .{ .f32 = @intToFloat(f32, x) }, + 64 => .{ .f64 = @intToFloat(f64, x) }, + 80 => .{ .f80 = @intToFloat(f80, x) }, + 128 => .{ .f128 = @intToFloat(f128, x) }, else => unreachable, - } - } - - pub fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value { - switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)), - 32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)), - 64 => return Value.Tag.float_64.create(arena, @floatCast(f64, float)), - 80 => return Value.Tag.float_80.create(arena, @floatCast(f80, float)), - 128 => return Value.Tag.float_128.create(arena, float), - else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = dest_ty.toIntern(), + .storage = storage, + } })).toValue(); } fn calcLimbLenFloat(scalar: anytype) usize { @@ -3422,22 +2201,6 @@ pub const Value = extern union { wrapped_result: Value, }; - pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return Value.Tag.int_u64.create(arena, x); - } else |_| { - return Value.Tag.int_big_positive.create(arena, big_int.limbs); - } - } else { - if (big_int.to(i64)) |x| { - return Value.Tag.int_i64.create(arena, x); - } else |_| { - return Value.Tag.int_big_negative.create(arena, big_int.limbs); - } - } - } - /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intAddSat( lhs: Value, @@ -3446,19 +2209,20 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intAddSatScalar(lhs, rhs, ty, arena, target); + return intAddSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3467,24 +2231,24 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3495,19 +2259,20 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intSubSatScalar(lhs, rhs, ty, arena, target); + return intSubSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3516,24 +2281,24 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intMulWithOverflow( @@ -3543,25 +2308,30 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen()); - const result_data = try arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try arena.alloc(InternPool.Index, vec_len); + const result_data = try arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } - return intMulWithOverflowScalar(lhs, rhs, ty, arena, target); + return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); } pub fn intMulWithOverflowScalar( @@ -3569,14 +2339,14 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -3594,8 +2364,8 @@ pub const Value = extern union { } return OverflowArithmeticResult{ - .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(arena, result_bigint.toConst()), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -3607,16 +2377,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return numberMulWrapScalar(lhs, rhs, ty, arena, mod); } @@ -3629,10 +2401,10 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; - if (ty.zigTypeTag() == .ComptimeInt) { - return intMul(lhs, rhs, ty, arena, mod); + if (ty.zigTypeTag(mod) == .ComptimeInt) { + return intMul(lhs, rhs, ty, undefined, arena, mod); } if (ty.isAnyFloat()) { @@ -3651,19 +2423,20 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intMulSatScalar(lhs, rhs, ty, arena, target); + return intMulSatScalar(lhs, rhs, ty, arena, mod); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3672,17 +2445,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max( @@ -3698,28 +2471,28 @@ pub const Value = extern union { ); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. - pub fn numberMax(lhs: Value, rhs: Value, target: Target) Value { - if (lhs.isUndef() or rhs.isUndef()) return undef; - if (lhs.isNan()) return rhs; - if (rhs.isNan()) return lhs; + pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; + if (lhs.isNan(mod)) return rhs; + if (rhs.isNan(mod)) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => rhs, .gt, .eq => lhs, }; } /// Supports both floats and ints; handles undefined. - pub fn numberMin(lhs: Value, rhs: Value, target: Target) Value { - if (lhs.isUndef() or rhs.isUndef()) return undef; - if (lhs.isNan()) return rhs; - if (rhs.isNan()) return lhs; + pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; + if (lhs.isNan(mod)) return rhs; + if (rhs.isNan(mod)) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => lhs, .gt, .eq => rhs, }; @@ -3727,24 +2500,27 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return bitwiseNotScalar(val, ty, arena, target); + return bitwiseNotScalar(val, ty, arena, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value { - if (val.isUndef()) return Value.initTag(.undef); + pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (val.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(!val.toBool()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits == 0) { return val; @@ -3753,7 +2529,7 @@ pub const Value = extern union { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3761,36 +2537,38 @@ pub const Value = extern union { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return bitwiseAndScalar(lhs, rhs, allocator, target); + return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3798,102 +2576,104 @@ pub const Value = extern union { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNandScalar(lhs, rhs, ty, arena, mod); } /// operands must be integers; handles undefined. pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool())); const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - - const all_ones = if (ty.isSignedInt()) - try Value.Tag.int_i64.create(arena, -1) - else - try ty.maxInt(arena, mod.getTarget()); - + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); return bitwiseXor(anded, all_ones, ty, arena, mod); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return bitwiseOrScalar(lhs, rhs, allocator, target); + return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return bitwiseXorScalar(lhs, rhs, allocator, target); + return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3901,32 +2681,61 @@ pub const Value = extern union { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } - pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting + /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). + pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { + var overflow: usize = undefined; + return intDivInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return intDivInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; + } + + fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intDivScalar(lhs, rhs, allocator, target); + return intDivScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3942,32 +2751,39 @@ pub const Value = extern union { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + if (ty.toIntern() != .comptime_int_type) { + const info = ty.intInfo(mod); + if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) { + return error.Overflow; + } + } + return mod.intValue_big(ty, result_q.toConst()); } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intDivFloorScalar(lhs, rhs, allocator, target); + return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3983,32 +2799,33 @@ pub const Value = extern union { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intModScalar(lhs, rhs, allocator, target); + return intModScalar(lhs, rhs, ty, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -4024,161 +2841,164 @@ pub const Value = extern union { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_r.toConst()); + return mod.intValue_big(ty, result_r.toConst()); } /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. - pub fn isNan(val: Value) bool { - return switch (val.tag()) { - .float_16 => std.math.isNan(val.castTag(.float_16).?.data), - .float_32 => std.math.isNan(val.castTag(.float_32).?.data), - .float_64 => std.math.isNan(val.castTag(.float_64).?.data), - .float_80 => std.math.isNan(val.castTag(.float_80).?.data), - .float_128 => std.math.isNan(val.castTag(.float_128).?.data), + pub fn isNan(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isNan(x), + }, else => false, }; } /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. - pub fn isInf(val: Value) bool { - return switch (val.tag()) { - .float_16 => std.math.isInf(val.castTag(.float_16).?.data), - .float_32 => std.math.isInf(val.castTag(.float_32).?.data), - .float_64 => std.math.isInf(val.castTag(.float_64).?.data), - .float_80 => std.math.isInf(val.castTag(.float_80).?.data), - .float_128 => std.math.isInf(val.castTag(.float_128).?.data), + pub fn isInf(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isInf(x), + }, else => false, }; } - pub fn isNegativeInf(val: Value) bool { - return switch (val.tag()) { - .float_16 => std.math.isNegativeInf(val.castTag(.float_16).?.data), - .float_32 => std.math.isNegativeInf(val.castTag(.float_32).?.data), - .float_64 => std.math.isNegativeInf(val.castTag(.float_64).?.data), - .float_80 => std.math.isNegativeInf(val.castTag(.float_80).?.data), - .float_128 => std.math.isNegativeInf(val.castTag(.float_128).?.data), + pub fn isNegativeInf(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isNegativeInf(x), + }, else => false, }; } pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatRemScalar(lhs, rhs, float_type, arena, target); + return floatRemScalar(lhs, rhs, float_type, mod); } - pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val)); - }, + pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @rem(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @rem(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @rem(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @rem(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatModScalar(lhs, rhs, float_type, arena, target); + return floatModScalar(lhs, rhs, float_type, mod); } - pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val)); - }, + pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @mod(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @mod(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @mod(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @mod(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } - pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting + /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). + pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { + var overflow: usize = undefined; + return intMulInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return intMulInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; + } + + fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intMulScalar(lhs, rhs, allocator, target); + return intMulScalar(lhs, rhs, ty, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.toIntern() != .comptime_int_type) { + const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, mod); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -4190,21 +3010,23 @@ pub const Value = extern union { ); defer allocator.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intTruncScalar(val, allocator, signedness, bits, target); + return intTruncScalar(val, ty, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4216,26 +3038,34 @@ pub const Value = extern union { bits: Value, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - var bits_buf: Value.ElemValueBuffer = undefined; - const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target); + const elem_val = try val.elemValue(mod, i); + const bits_elem = try bits.elemValue(mod, i); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(target)), target); + return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value { - if (bits == 0) return Value.zero; + pub fn intTruncScalar( + val: Value, + ty: Type, + allocator: Allocator, + signedness: std.builtin.Signedness, + bits: u16, + mod: *Module, + ) !Value { + if (bits == 0) return mod.intValue(ty, 0); var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, @@ -4244,31 +3074,32 @@ pub const Value = extern union { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.truncate(val_bigint, signedness, bits); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return shlScalar(lhs, rhs, allocator, target); + return shlScalar(lhs, rhs, ty, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4279,7 +3110,12 @@ pub const Value = extern union { .len = undefined, }; result_bigint.shiftLeft(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + if (ty.toIntern() != .comptime_int_type) { + const int_info = ty.intInfo(mod); + result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); + } + + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlWithOverflow( @@ -4289,25 +3125,30 @@ pub const Value = extern union { allocator: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); - const result_data = try allocator.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); + const result_data = try allocator.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(allocator, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } - return shlWithOverflowScalar(lhs, rhs, ty, allocator, target); + return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); } pub fn shlWithOverflowScalar( @@ -4315,12 +3156,12 @@ pub const Value = extern union { rhs: Value, ty: Type, allocator: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4336,8 +3177,8 @@ pub const Value = extern union { result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); } return OverflowArithmeticResult{ - .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(allocator, result_bigint.toConst()), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -4348,19 +3189,20 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return shlSatScalar(lhs, rhs, ty, arena, target); + return shlSatScalar(lhs, rhs, ty, arena, mod); } pub fn shlSatScalar( @@ -4368,15 +3210,15 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -4387,7 +3229,7 @@ pub const Value = extern union { .len = undefined, }; result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlTrunc( @@ -4397,16 +3239,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlTruncScalar(lhs, rhs, ty, arena, mod); } @@ -4419,42 +3263,43 @@ pub const Value = extern union { mod: *Module, ) !Value { const shifted = try lhs.shl(rhs, ty, arena, mod); - const int_info = ty.intInfo(mod.getTarget()); + const int_info = ty.intInfo(mod); const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); return truncated; } pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return shrScalar(lhs, rhs, allocator, target); + return shrScalar(lhs, rhs, ty, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { - return Value.zero; + return mod.intValue(ty, 0); } else { - return Value.negative_one; + return mod.intValue(ty, -1); } } @@ -4468,7 +3313,7 @@ pub const Value = extern union { .len = undefined, }; result_bigint.shiftRight(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn floatNeg( @@ -4477,33 +3322,127 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatNegScalar(val, float_type, arena, target); + return floatNegScalar(val, float_type, mod); } pub fn floatNegScalar( val: Value, float_type: Type, - arena: Allocator, - target: Target, + mod: *Module, ) !Value { - switch (float_type.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128)), + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = -val.toFloat(f16, mod) }, + 32 => .{ .f32 = -val.toFloat(f32, mod) }, + 64 => .{ .f64 = -val.toFloat(f64, mod) }, + 80 => .{ .f80 = -val.toFloat(f80, mod) }, + 128 => .{ .f128 = -val.toFloat(f128, mod) }, else => unreachable, + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); + } + + pub fn floatAdd( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, + ) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } + return floatAddScalar(lhs, rhs, float_type, mod); + } + + pub fn floatAddScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, + ) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) + rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) + rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) + rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) + rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); + } + + pub fn floatSub( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, + ) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); + } + return floatSubScalar(lhs, rhs, float_type, mod); + } + + pub fn floatSubScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, + ) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) - rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) - rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) - rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) - rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn floatDiv( @@ -4513,56 +3452,41 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatDivScalar(lhs, rhs, float_type, arena, target); + return floatDivScalar(lhs, rhs, float_type, mod); } pub fn floatDivScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - target: Target, + mod: *Module, ) !Value { - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(arena, lhs_val / rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(arena, lhs_val / rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(arena, lhs_val / rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(arena, lhs_val / rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(arena, lhs_val / rhs_val); - }, + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) / rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) / rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) / rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) / rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn floatDivFloor( @@ -4572,56 +3496,41 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatDivFloorScalar(lhs, rhs, float_type, arena, target); + return floatDivFloorScalar(lhs, rhs, float_type, mod); } pub fn floatDivFloorScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - target: Target, + mod: *Module, ) !Value { - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val)); - }, + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn floatDivTrunc( @@ -4631,56 +3540,41 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatDivTruncScalar(lhs, rhs, float_type, arena, target); + return floatDivTruncScalar(lhs, rhs, float_type, mod); } pub fn floatDivTruncScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - target: Target, + mod: *Module, ) !Value { - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val)); - }, + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn floatMul( @@ -4690,616 +3584,489 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floatMulScalar(lhs, rhs, float_type, arena, target); + return floatMulScalar(lhs, rhs, float_type, mod); } pub fn floatMulScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - target: Target, + mod: *Module, ) !Value { - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(arena, lhs_val * rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(arena, lhs_val * rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(arena, lhs_val * rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(arena, lhs_val * rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(arena, lhs_val * rhs_val); - }, + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) * rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) * rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) * rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) * rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return sqrtScalar(val, float_type, arena, target); + return sqrtScalar(val, float_type, mod); } - pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @sqrt(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @sqrt(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @sqrt(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @sqrt(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @sqrt(f)); - }, + pub fn sqrtScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @sqrt(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @sqrt(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @sqrt(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @sqrt(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return sinScalar(val, float_type, arena, target); + return sinScalar(val, float_type, mod); } - pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @sin(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @sin(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @sin(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @sin(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @sin(f)); - }, + pub fn sinScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @sin(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @sin(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @sin(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @sin(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @sin(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return cosScalar(val, float_type, arena, target); + return cosScalar(val, float_type, mod); } - pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @cos(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @cos(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @cos(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @cos(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @cos(f)); - }, + pub fn cosScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @cos(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @cos(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @cos(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @cos(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @cos(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return tanScalar(val, float_type, arena, target); + return tanScalar(val, float_type, mod); } - pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @tan(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @tan(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @tan(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @tan(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @tan(f)); - }, + pub fn tanScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @tan(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @tan(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @tan(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @tan(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @tan(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return expScalar(val, float_type, arena, target); + return expScalar(val, float_type, mod); } - pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @exp(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @exp(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @exp(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @exp(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @exp(f)); - }, + pub fn expScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @exp(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @exp(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @exp(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @exp(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @exp(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return exp2Scalar(val, float_type, arena, target); + return exp2Scalar(val, float_type, mod); } - pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @exp2(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @exp2(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @exp2(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @exp2(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @exp2(f)); - }, + pub fn exp2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @exp2(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @exp2(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @exp2(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @exp2(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return logScalar(val, float_type, arena, target); + return logScalar(val, float_type, mod); } - pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @log(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @log(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @log(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @log(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @log(f)); - }, + pub fn logScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return log2Scalar(val, float_type, arena, target); + return log2Scalar(val, float_type, mod); } - pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @log2(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @log2(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @log2(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @log2(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @log2(f)); - }, + pub fn log2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log2(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log2(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log2(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log2(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log2(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return log10Scalar(val, float_type, arena, target); + return log10Scalar(val, float_type, mod); } - pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @log10(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @log10(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @log10(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @log10(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @log10(f)); - }, + pub fn log10Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log10(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log10(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log10(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log10(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log10(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try fabsScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return fabsScalar(val, float_type, arena, target); + return fabsScalar(val, float_type, mod); } - pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @fabs(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @fabs(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @fabs(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @fabs(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @fabs(f)); - }, + pub fn fabsScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @fabs(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @fabs(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @fabs(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @fabs(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @fabs(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return floorScalar(val, float_type, arena, target); + return floorScalar(val, float_type, mod); } - pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @floor(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @floor(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @floor(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @floor(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @floor(f)); - }, + pub fn floorScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @floor(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @floor(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @floor(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @floor(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @floor(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return ceilScalar(val, float_type, arena, target); + return ceilScalar(val, float_type, mod); } - pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @ceil(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @ceil(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @ceil(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @ceil(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @ceil(f)); - }, + pub fn ceilScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @ceil(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @ceil(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @ceil(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @ceil(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return roundScalar(val, float_type, arena, target); + return roundScalar(val, float_type, mod); } - pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @round(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @round(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @round(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @round(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @round(f)); - }, + pub fn roundScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @round(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @round(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @round(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @round(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @round(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return truncScalar(val, float_type, arena, target); + return truncScalar(val, float_type, mod); } - pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16); - return Value.Tag.float_16.create(arena, @trunc(f)); - }, - 32 => { - const f = val.toFloat(f32); - return Value.Tag.float_32.create(arena, @trunc(f)); - }, - 64 => { - const f = val.toFloat(f64); - return Value.Tag.float_64.create(arena, @trunc(f)); - }, - 80 => { - const f = val.toFloat(f80); - return Value.Tag.float_80.create(arena, @trunc(f)); - }, - 128 => { - const f = val.toFloat(f128); - return Value.Tag.float_128.create(arena, @trunc(f)); - }, + pub fn truncScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @trunc(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @trunc(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @trunc(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @trunc(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } pub fn mulAdd( @@ -5310,28 +4077,21 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var mulend1_buf: Value.ElemValueBuffer = undefined; - const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); - var mulend2_buf: Value.ElemValueBuffer = undefined; - const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf); - var addend_buf: Value.ElemValueBuffer = undefined; - const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); - scalar.* = try mulAddScalar( - float_type.scalarType(), - mulend1_elem, - mulend2_elem, - addend_elem, - arena, - target, - ); + const mulend1_elem = try mulend1.elemValue(mod, i); + const mulend2_elem = try mulend2.elemValue(mod, i); + const addend_elem = try addend.elemValue(mod, i); + scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target); + return mulAddScalar(float_type, mulend1, mulend2, addend, mod); } pub fn mulAddScalar( @@ -5339,54 +4099,33 @@ pub const Value = extern union { mulend1: Value, mulend2: Value, addend: Value, - arena: Allocator, - target: Target, + mod: *Module, ) Allocator.Error!Value { - switch (float_type.floatBits(target)) { - 16 => { - const m1 = mulend1.toFloat(f16); - const m2 = mulend2.toFloat(f16); - const a = addend.toFloat(f16); - return Value.Tag.float_16.create(arena, @mulAdd(f16, m1, m2, a)); - }, - 32 => { - const m1 = mulend1.toFloat(f32); - const m2 = mulend2.toFloat(f32); - const a = addend.toFloat(f32); - return Value.Tag.float_32.create(arena, @mulAdd(f32, m1, m2, a)); - }, - 64 => { - const m1 = mulend1.toFloat(f64); - const m2 = mulend2.toFloat(f64); - const a = addend.toFloat(f64); - return Value.Tag.float_64.create(arena, @mulAdd(f64, m1, m2, a)); - }, - 80 => { - const m1 = mulend1.toFloat(f80); - const m2 = mulend2.toFloat(f80); - const a = addend.toFloat(f80); - return Value.Tag.float_80.create(arena, @mulAdd(f80, m1, m2, a)); - }, - 128 => { - const m1 = mulend1.toFloat(f128); - const m2 = mulend2.toFloat(f128); - const a = addend.toFloat(f128); - return Value.Tag.float_128.create(arena, @mulAdd(f128, m1, m2, a)); - }, + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, mod), mulend2.toFloat(f16, mod), addend.toFloat(f16, mod)) }, + 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, mod), mulend2.toFloat(f32, mod), addend.toFloat(f32, mod)) }, + 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, mod), mulend2.toFloat(f64, mod), addend.toFloat(f64, mod)) }, + 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, mod), mulend2.toFloat(f80, mod), addend.toFloat(f80, mod)) }, + 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } })).toValue(); } /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. - pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { - const target = mod.getTarget(); - const abi_size = std.math.cast(usize, ty.abiSize(target)) orelse return null; + pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?Value { + const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, // TODO: The writeToMemory function was originally created for the purpose // of comptime pointer casting. However, it is now additionally being used @@ -5400,118 +4139,22 @@ pub const Value = extern union { for (byte_buffer[1..]) |byte| { if (byte != first_byte) return null; } - value_buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = first_byte, - }; - return initPayload(&value_buffer.base); + return try mod.intValue(Type.u8, first_byte); + } + + pub fn isGenericPoison(val: Value) bool { + return val.toIntern() == .generic_poison; } /// This type is not copyable since it may contain pointers to its inner data. pub const Payload = struct { tag: Tag, - pub const U32 = struct { - base: Payload, - data: u32, - }; - - pub const U64 = struct { - base: Payload, - data: u64, - }; - - pub const I64 = struct { - base: Payload, - data: i64, - }; - - pub const BigInt = struct { - base: Payload, - data: []const std.math.big.Limb, - - pub fn asBigInt(self: BigInt) BigIntConst { - const positive = switch (self.base.tag) { - .int_big_positive => true, - .int_big_negative => false, - else => unreachable, - }; - return BigIntConst{ .limbs = self.data, .positive = positive }; - } - }; - - pub const Function = struct { - base: Payload, - data: *Module.Fn, - }; - - pub const ExternFn = struct { - base: Payload, - data: *Module.ExternFn, - }; - - pub const Decl = struct { - base: Payload, - data: Module.Decl.Index, - }; - - pub const Variable = struct { - base: Payload, - data: *Module.Var, - }; - - pub const SubValue = struct { - base: Payload, - data: Value, - }; - - pub const DeclRefMut = struct { - pub const base_tag = Tag.decl_ref_mut; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - decl_index: Module.Decl.Index, - runtime_index: RuntimeIndex, - }; - }; - - pub const PayloadPtr = struct { + pub const Slice = struct { base: Payload, data: struct { - container_ptr: Value, - container_ty: Type, - }, - }; - - pub const ComptimeFieldPtr = struct { - base: Payload, - data: struct { - field_val: Value, - field_ty: Type, - }, - }; - - pub const ElemPtr = struct { - pub const base_tag = Tag.elem_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - array_ptr: Value, - elem_ty: Type, - index: usize, - }, - }; - - pub const FieldPtr = struct { - pub const base_tag = Tag.field_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - container_ptr: Value, - container_ty: Type, - field_index: usize, + ptr: Value, + len: Value, }, }; @@ -5521,9 +4164,9 @@ pub const Value = extern union { data: []const u8, }; - pub const StrLit = struct { + pub const SubValue = struct { base: Payload, - data: Module.StringLiteralContext.Key, + data: Value, }; pub const Aggregate = struct { @@ -5533,156 +4176,42 @@ pub const Value = extern union { data: []Value, }; - pub const Slice = struct { - base: Payload, - data: struct { - ptr: Value, - len: Value, - }, - - pub const ptr_index = 0; - pub const len_index = 1; - }; - - pub const Ty = struct { - base: Payload, - data: Type, - }; - - pub const IntType = struct { - pub const base_tag = Tag.int_type; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - bits: u16, - signed: bool, - }, - }; - - pub const Float_16 = struct { - pub const base_tag = Tag.float_16; - - base: Payload = .{ .tag = base_tag }, - data: f16, - }; - - pub const Float_32 = struct { - pub const base_tag = Tag.float_32; - - base: Payload = .{ .tag = base_tag }, - data: f32, - }; - - pub const Float_64 = struct { - pub const base_tag = Tag.float_64; - - base: Payload = .{ .tag = base_tag }, - data: f64, - }; - - pub const Float_80 = struct { - pub const base_tag = Tag.float_80; - - base: Payload = .{ .tag = base_tag }, - data: f80, - }; - - pub const Float_128 = struct { - pub const base_tag = Tag.float_128; - - base: Payload = .{ .tag = base_tag }, - data: f128, - }; - - pub const Error = struct { - base: Payload = .{ .tag = .@"error" }, - data: struct { - /// `name` is owned by `Module` and will be valid for the entire - /// duration of the compilation. - /// TODO revisit this when we have the concept of the error tag type - name: []const u8, - }, - }; - - pub const InferredAlloc = struct { - pub const base_tag = Tag.inferred_alloc; - - base: Payload = .{ .tag = base_tag }, - data: struct { - /// The value stored in the inferred allocation. This will go into - /// peer type resolution. This is stored in a separate list so that - /// the items are contiguous in memory and thus can be passed to - /// `Module.resolvePeerTypes`. - prongs: std.MultiArrayList(struct { - /// The dummy instruction used as a peer to resolve the type. - /// Although this has a redundant type with placeholder, this is - /// needed in addition because it may be a constant value, which - /// affects peer type resolution. - stored_inst: Air.Inst.Ref, - /// The bitcast instruction used as a placeholder when the - /// new result pointer type is not yet known. - placeholder: Air.Inst.Index, - }) = .{}, - /// 0 means ABI-aligned. - alignment: u32, - }, - }; - - pub const InferredAllocComptime = struct { - pub const base_tag = Tag.inferred_alloc_comptime; - - base: Payload = .{ .tag = base_tag }, - data: struct { - decl_index: Module.Decl.Index, - /// 0 means ABI-aligned. - alignment: u32, - }, - }; - pub const Union = struct { pub const base_tag = Tag.@"union"; base: Payload = .{ .tag = base_tag }, - data: struct { + data: Data, + + pub const Data = struct { tag: Value, val: Value, - }, + }; }; }; - /// Big enough to fit any non-BigInt value - pub const BigIntSpace = struct { - /// The +1 is headroom so that operations such as incrementing once or decrementing once - /// are possible without using an allocator. - limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, - }; + pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero = initTag(.zero); - pub const one = initTag(.one); - pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base }; - pub const undef = initTag(.undef); - pub const @"void" = initTag(.void_value); - pub const @"null" = initTag(.null_value); - pub const @"false" = initTag(.bool_false); - pub const @"true" = initTag(.bool_true); + pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; + pub const zero_u8: Value = .{ .ip_index = .zero_u8, .legacy = undefined }; + pub const zero_comptime_int: Value = .{ .ip_index = .zero, .legacy = undefined }; + pub const one_comptime_int: Value = .{ .ip_index = .one, .legacy = undefined }; + pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one, .legacy = undefined }; + pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; + pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; + pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; + pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; + pub const @"true": Value = .{ .ip_index = .bool_true, .legacy = undefined }; + pub const @"unreachable": Value = .{ .ip_index = .unreachable_value, .legacy = undefined }; + + pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined }; + pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; + pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined }; pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; } - pub fn boolToInt(x: bool) Value { - return if (x) Value.one else Value.zero; - } - - pub const RuntimeIndex = enum(u32) { - zero = 0, - comptime_field_ptr = std.math.maxInt(u32), - _, - - pub fn increment(ri: *RuntimeIndex) void { - ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1); - } - }; + pub const RuntimeIndex = InternPool.RuntimeIndex; /// This function is used in the debugger pretty formatters in tools/ to fetch the /// Tag to Payload mapping to facilitate fancy debug printing for this type. @@ -5691,7 +4220,7 @@ pub const Value = extern union { var fields: [tags.len]std.builtin.Type.StructField = undefined; for (&fields, tags) |*field, t| field.* = .{ .name = t.name, - .type = *if (t.value < Tag.no_payload_count) void else @field(Tag, t.name).Type(), + .type = *@field(Tag, t.name).Type(), .default_value = null, .is_comptime = false, .alignment = 0, @@ -5713,8 +4242,3 @@ pub const Value = extern union { } } }; - -var negative_one_payload: Value.Payload.I64 = .{ - .base = .{ .tag = .int_i64 }, - .data = -1, -}; diff --git a/test/behavior/bugs/1381.zig b/test/behavior/bugs/1381.zig index 90941de341..f35c963df3 100644 --- a/test/behavior/bugs/1381.zig +++ b/test/behavior/bugs/1381.zig @@ -17,6 +17,7 @@ test "union that needs padding bytes inside an array" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; var as = [_]A{ A{ .B = B{ .D = 1 } }, diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig index 297c9c7423..31dea02cf6 100644 --- a/test/behavior/bugs/6456.zig +++ b/test/behavior/bugs/6456.zig @@ -24,7 +24,7 @@ test "issue 6456" { .alignment = 0, .name = name, .type = usize, - .default_value = &@as(?usize, null), + .default_value = null, .is_comptime = false, }}; } diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 594bf683e5..e9ebd4476b 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -746,8 +746,8 @@ test "peer type resolution: disjoint error sets" { try expect(error_set_info == .ErrorSet); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } { @@ -756,8 +756,8 @@ test "peer type resolution: disjoint error sets" { try expect(error_set_info == .ErrorSet); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } } @@ -778,8 +778,8 @@ test "peer type resolution: error union and error set" { const error_set_info = @typeInfo(info.ErrorUnion.error_set); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } { @@ -790,8 +790,8 @@ test "peer type resolution: error union and error set" { const error_set_info = @typeInfo(info.ErrorUnion.error_set); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } } diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 2fdb112a72..e8199913bc 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -214,8 +214,8 @@ test "type info: error set merged" { try expect(error_set_info == .ErrorSet); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } test "type info: enum info" { diff --git a/test/cases/compile_errors/access_non-existent_member_of_error_set.zig b/test/cases/compile_errors/access_non-existent_member_of_error_set.zig index 765bbe59c3..7f0bc562ac 100644 --- a/test/cases/compile_errors/access_non-existent_member_of_error_set.zig +++ b/test/cases/compile_errors/access_non-existent_member_of_error_set.zig @@ -9,4 +9,3 @@ comptime { // target=native // // :3:18: error: no error named 'Bar' in 'error{A}' -// :1:13: note: error set declared here diff --git a/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig b/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig index 8a39fdec46..64fae2aac3 100644 --- a/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig +++ b/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig @@ -14,4 +14,4 @@ export fn entry() void { // :2:5: error: found compile log statement // // Compile Log Output: -// @as(*const [3:0]u8, "i32\x00") +// @as(*const [3:0]u8, "i32") diff --git a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig index 22fc965769..a3af883198 100644 --- a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig +++ b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig @@ -1,5 +1,5 @@ -const Set1 = error {A, B}; -const Set2 = error {A, C}; +const Set1 = error{ A, B }; +const Set2 = error{ A, C }; comptime { var x = Set1.B; var y = @errSetCast(Set2, x); @@ -10,5 +10,4 @@ comptime { // backend=stage2 // target=native // -// :5:13: error: 'error.B' not a member of error set 'error{A,C}' -// :2:14: note: error set declared here +// :5:13: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig b/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig index 0a182343b9..5e5b57680c 100644 --- a/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig +++ b/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig @@ -1,5 +1,5 @@ -const Set1 = error{A, B}; -const Set2 = error{A, C}; +const Set1 = error{ A, B }; +const Set2 = error{ A, C }; export fn entry() void { foo(Set1.B); } @@ -12,5 +12,5 @@ fn foo(set1: Set1) void { // backend=stage2 // target=native // -// :7:19: error: expected type 'error{A,C}', found 'error{A,B}' +// :7:19: error: expected type 'error{C,A}', found 'error{A,B}' // :7:19: note: 'error.B' not a member of destination error set diff --git a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig index 43aad76f45..f837ccd532 100644 --- a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig +++ b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig @@ -16,5 +16,4 @@ comptime { // backend=llvm // target=native // -// :11:13: error: 'error.B' not a member of error set 'error{A,C}' -// :5:14: note: error set declared here +// :11:13: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig index c7fc39f769..fa58c0845a 100644 --- a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig +++ b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig @@ -24,5 +24,5 @@ export fn bar() void { // // :12:16: error: runtime coercion to union 'tmp.U' from non-exhaustive enum // :1:11: note: enum declared here -// :17:16: error: union 'tmp.U' has no tag with value '15' +// :17:16: error: union 'tmp.U' has no tag with value '@intToEnum(tmp.E, 15)' // :6:11: note: union declared here diff --git a/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig b/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig index c0a0b06af0..da7f2492d1 100644 --- a/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig +++ b/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig @@ -16,9 +16,9 @@ comptime { // backend=stage2 // target=native // -// :2:29: error: expected type '[][]const u8', found '*const tuple{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' +// :2:29: error: expected type '[][]const u8', found '*const struct{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' // :2:29: note: cast discards const qualifier -// :6:31: error: expected type '*[2][]const u8', found '*const tuple{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' +// :6:31: error: expected type '*[2][]const u8', found '*const struct{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' // :6:31: note: cast discards const qualifier // :11:19: error: expected type '*tmp.S', found '*const struct{comptime a: comptime_int = 2}' // :11:19: note: cast discards const qualifier diff --git a/test/cases/compile_errors/return_invalid_type_from_test.zig b/test/cases/compile_errors/return_invalid_type_from_test.zig index a954bd7ee5..acc932cb0a 100644 --- a/test/cases/compile_errors/return_invalid_type_from_test.zig +++ b/test/cases/compile_errors/return_invalid_type_from_test.zig @@ -1,8 +1,10 @@ -test "example" { return 1; } +test "example" { + return 1; +} // error // backend=stage2 // target=native // is_test=1 // -// :1:25: error: expected type '@typeInfo(@typeInfo(@TypeOf(tmp.test.example)).Fn.return_type.?).ErrorUnion.error_set!void', found 'comptime_int' \ No newline at end of file +// :2:12: error: expected type 'anyerror!void', found 'comptime_int' diff --git a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig index 1de0d1c145..3523a36054 100644 --- a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig +++ b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig @@ -1,5 +1,5 @@ test "enum" { - const E = enum(u8) {A, B, _}; + const E = enum(u8) { A, B, _ }; _ = @tagName(@intToEnum(E, 5)); } @@ -8,5 +8,5 @@ test "enum" { // target=native // is_test=1 // -// :3:9: error: no field with value '5' in enum 'test.enum.E' +// :3:9: error: no field with value '@intToEnum(tmp.test.enum.E, 5)' in enum 'test.enum.E' // :2:15: note: declared here diff --git a/test/cases/compile_errors/tuple_init_edge_cases.zig b/test/cases/compile_errors/tuple_init_edge_cases.zig index 32b52cdc1f..f093515a38 100644 --- a/test/cases/compile_errors/tuple_init_edge_cases.zig +++ b/test/cases/compile_errors/tuple_init_edge_cases.zig @@ -41,4 +41,4 @@ pub export fn entry5() void { // :12:14: error: missing tuple field with index 1 // :17:14: error: missing tuple field with index 1 // :29:14: error: expected at most 2 tuple fields; found 3 -// :34:30: error: index '2' out of bounds of tuple 'tuple{comptime comptime_int = 123, u32}' +// :34:30: error: index '2' out of bounds of tuple 'struct{comptime comptime_int = 123, u32}' diff --git a/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig b/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig index 284d3c0d0d..9f360e2afe 100644 --- a/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig +++ b/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig @@ -7,4 +7,4 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:11: error: expected type '@TypeOf(.{})', found 'tuple{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}' +// :3:11: error: expected type '@TypeOf(.{})', found 'struct{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}' diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index e0b84e1b41..4f2d2b03cb 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -115,7 +115,7 @@ class zig_Slice_SynthProvider: try: return int(name.removeprefix('[').removesuffix(']')) except: return -1 def get_child_at_index(self, index): - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None try: return self.ptr.CreateChildAtOffset('[%d]' % index, index * self.elem_size, self.elem_type) except: return None @@ -176,7 +176,7 @@ class zig_TaggedUnion_SynthProvider: def get_child_index(self, name): try: return ('tag', 'payload').index(name) except: return -1 - def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None + def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None # Define Zig Standard Library @@ -196,7 +196,7 @@ class std_SegmentedList_SynthProvider: except: return -1 def get_child_at_index(self, index): try: - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None prealloc_item_count = len(self.prealloc_segment) if index < prealloc_item_count: return self.prealloc_segment.child[index] prealloc_exp = prealloc_item_count.bit_length() - 1 @@ -231,7 +231,7 @@ class std_MultiArrayList_SynthProvider: except: return -1 def get_child_at_index(self, index): try: - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None offset = 0 data = lldb.SBData() for field in self.entry_type.fields: @@ -266,7 +266,7 @@ class std_MultiArrayList_Slice_SynthProvider: except: return -1 def get_child_at_index(self, index): try: - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None data = lldb.SBData() for field in self.entry_type.fields: field_type = field.type.GetPointeeType() @@ -328,7 +328,7 @@ class std_Entry_SynthProvider: def has_children(self): return self.num_children() != 0 def num_children(self): return len(self.children) def get_child_index(self, name): return self.indices.get(name) - def get_child_at_index(self, index): return self.children[index].deref if index >= 0 and index < len(self.children) else None + def get_child_at_index(self, index): return self.children[index].deref if index in range(len(self.children)) else None # Define Zig Stage2 Compiler @@ -345,11 +345,17 @@ class TagAndPayload_SynthProvider: def get_child_index(self, name): try: return ('tag', 'payload').index(name) except: return -1 - def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None + def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None -def Inst_Ref_SummaryProvider(value, _=None): +def Zir_Inst__Zir_Inst_Ref_SummaryProvider(value, _=None): members = value.type.enum_members - return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned - len(members)) + # ignore .var_args_param_type and .none + return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members)) + +def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None): + members = value.type.enum_members + # ignore .var_args_param_type and .none + return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members)) class Module_Decl__Module_Decl_Index_SynthProvider: def __init__(self, value, _=None): self.value = value @@ -359,7 +365,7 @@ class Module_Decl__Module_Decl_Index_SynthProvider: mod = frame.FindVariable('mod') or frame.FindVariable('module') if mod: break else: return - self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).Clone('decl') + self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).address_of.Clone('decl') except: pass def has_children(self): return True def num_children(self): return 1 @@ -392,7 +398,7 @@ class TagOrPayloadPtr_SynthProvider: def get_child_index(self, name): try: return ('tag', 'payload').index(name) except: return -1 - def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None + def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None def Module_Decl_name(decl): error = lldb.SBError() @@ -407,6 +413,89 @@ def Module_Decl_RenderFullyQualifiedName(decl): return '.'.join((Module_Namespac def OwnerDecl_RenderFullyQualifiedName(payload): return Module_Decl_RenderFullyQualifiedName(payload.GetChildMemberWithName('owner_decl').GetChildMemberWithName('decl')) +def InternPool_Find(thread): + for frame in thread: + ip = frame.FindVariable('ip') or frame.FindVariable('intern_pool') + if ip: return ip + mod = frame.FindVariable('mod') or frame.FindVariable('module') + if mod: + ip = mod.GetChildMemberWithName('intern_pool') + if ip: return ip + +class InternPool_Index_SynthProvider: + def __init__(self, value, _=None): self.value = value + def update(self): + try: + index_type = self.value.type + for helper in self.value.target.FindFunctions('%s.dbHelper' % index_type.name, lldb.eFunctionNameTypeFull): + ptr_self_type, ptr_tag_to_encoding_map_type = helper.function.type.GetFunctionArgumentTypes() + if ptr_self_type.GetPointeeType() == index_type: break + else: return + tag_to_encoding_map = {field.name: field.type for field in ptr_tag_to_encoding_map_type.GetPointeeType().fields} + + ip = InternPool_Find(self.value.thread) + if not ip: return + self.item = ip.GetChildMemberWithName('items').GetChildAtIndex(self.value.unsigned) + extra = ip.GetChildMemberWithName('extra').GetChildMemberWithName('items') + self.tag = self.item.GetChildMemberWithName('tag').Clone('tag') + self.data = None + self.trailing = None + data = self.item.GetChildMemberWithName('data') + encoding_type = tag_to_encoding_map[self.tag.value] + dynamic_values = {} + for encoding_field in encoding_type.fields: + if encoding_field.name == 'data': + if encoding_field.type.IsPointerType(): + data_type = encoding_field.type.GetPointeeType() + extra_index = data.unsigned + self.data = extra.GetChildAtIndex(extra_index).Cast(data_type).Clone('data') + extra_index += data_type.num_fields + else: + self.data = data.Cast(encoding_field.type).Clone('data') + elif encoding_field.name == 'trailing': + trailing_data = lldb.SBData() + for trailing_field in encoding_field.type.fields: + trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data) + trailing_len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned + trailing_data.Append(lldb.SBData.CreateDataFromInt(trailing_len, trailing_data.GetAddressByteSize())) + extra_index += trailing_len + self.trailing = self.data.CreateValueFromData('trailing', trailing_data, encoding_field.type) + else: + for path in encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split(' orelse '): + if path.startswith('data.'): + root = self.data + path = path[len('data'):] + else: return + dynamic_value = root.GetValueForExpressionPath(path) + if dynamic_value: + dynamic_values[encoding_field.name] = dynamic_value + break + except: pass + def has_children(self): return True + def num_children(self): return 2 + (self.trailing is not None) + def get_child_index(self, name): + try: return ('tag', 'data', 'trailing').index(name) + except: return -1 + def get_child_at_index(self, index): return (self.tag, self.data, self.trailing)[index] if index in range(3) else None + +def InternPool_NullTerminatedString_SummaryProvider(value, _=None): + try: + ip = InternPool_Find(value.thread) + if not ip: return + items = ip.GetChildMemberWithName('string_bytes').GetChildMemberWithName('items') + b = bytearray() + i = 0 + while True: + x = items.GetChildAtIndex(value.unsigned + i).GetValueAsUnsigned() + if x == 0: break + b.append(x) + i += 1 + s = b.decode(encoding='utf8', errors='backslashreplace') + s1 = s if s.isprintable() else ''.join((c if c.isprintable() else '\\x%02x' % ord(c) for c in s)) + return '"%s"' % s1 + except: + pass + def type_Type_pointer(payload): pointee_type = payload.GetChildMemberWithName('pointee_type') sentinel = payload.GetChildMemberWithName('sentinel').GetChildMemberWithName('child') @@ -468,8 +557,8 @@ type_tag_handlers = { 'empty_struct_literal': lambda payload: '@TypeOf(.{})', 'anyerror_void_error_union': lambda payload: 'anyerror!void', - 'const_slice_u8': lambda payload: '[]const u8', - 'const_slice_u8_sentinel_0': lambda payload: '[:0]const u8', + 'slice_const_u8': lambda payload: '[]const u8', + 'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8', 'fn_noreturn_no_args': lambda payload: 'fn() noreturn', 'fn_void_no_args': lambda payload: 'fn() void', 'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn', @@ -495,7 +584,7 @@ type_tag_handlers = { 'many_mut_pointer': lambda payload: '[*]%s' % type_Type_SummaryProvider(payload), 'c_const_pointer': lambda payload: '[*c]const %s' % type_Type_SummaryProvider(payload), 'c_mut_pointer': lambda payload: '[*c]%s' % type_Type_SummaryProvider(payload), - 'const_slice': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), + 'slice_const': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), 'mut_slice': lambda payload: '[]%s' % type_Type_SummaryProvider(payload), 'int_signed': lambda payload: 'i%d' % payload.unsigned, 'int_unsigned': lambda payload: 'u%d' % payload.unsigned, @@ -611,13 +700,19 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', type='Zir.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Zir\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Zir\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) - add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', identifier='Inst_Ref', summary=True) + add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', summary=True) add(debugger, category='zig.stage2', type='Air.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True) + add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Ref', summary=True) add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) - add(debugger, category='zig.stage2', type='type.Type', identifier='TagOrPayloadPtr', synth=True) - add(debugger, category='zig.stage2', type='type.Type', summary=True) - add(debugger, category='zig.stage2', type='value.Value', identifier='TagOrPayloadPtr', synth=True) - add(debugger, category='zig.stage2', type='value.Value', summary=True) + add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) + add(debugger, category='zig.stage2', type='InternPool.NullTerminatedString', summary=True) + add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.ErrorUnion.Value', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Float.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Ptr.Addr', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Aggregate.Storage', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='arch.x86_64.CodeGen.MCValue', identifier='zig_TaggedUnion', synth=True, inline_children=True, summary=True) diff --git a/tools/stage2_gdb_pretty_printers.py b/tools/stage2_gdb_pretty_printers.py index bd64916536..f10e924855 100644 --- a/tools/stage2_gdb_pretty_printers.py +++ b/tools/stage2_gdb_pretty_printers.py @@ -18,7 +18,7 @@ class TypePrinter: 'many_mut_pointer': 'Type.Payload.ElemType', 'c_const_pointer': 'Type.Payload.ElemType', 'c_mut_pointer': 'Type.Payload.ElemType', - 'const_slice': 'Type.Payload.ElemType', + 'slice_const': 'Type.Payload.ElemType', 'mut_slice': 'Type.Payload.ElemType', 'optional': 'Type.Payload.ElemType', 'optional_single_mut_pointer': 'Type.Payload.ElemType',