diff --git a/CMakeLists.txt b/CMakeLists.txt index 33cdb66b5d..a33df3a096 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -522,6 +522,7 @@ set(ZIG_STAGE2_SOURCES src/Sema.zig src/Sema/bitcast.zig src/Sema/comptime_ptr_access.zig + src/Type.zig src/Value.zig src/Zcu.zig src/arch/aarch64/CodeGen.zig @@ -673,7 +674,6 @@ set(ZIG_STAGE2_SOURCES src/target.zig src/tracy.zig src/translate_c.zig - src/type.zig src/wasi_libc.zig ) diff --git a/build.zig b/build.zig index 3898acc6ac..0f0d7d4d67 100644 --- a/build.zig +++ b/build.zig @@ -82,15 +82,6 @@ pub fn build(b: *std.Build) !void { docs_step.dependOn(langref_step); docs_step.dependOn(std_docs_step); - const check_case_exe = b.addExecutable(.{ - .name = "check-case", - .root_source_file = b.path("test/src/Cases.zig"), - .target = b.graph.host, - .optimize = optimize, - .single_threaded = single_threaded, - }); - check_case_exe.stack_size = stack_size; - const skip_debug = b.option(bool, "skip-debug", "Main test suite skips debug builds") orelse false; const skip_release = b.option(bool, "skip-release", "Main test suite skips release builds") orelse false; const skip_release_small = b.option(bool, "skip-release-small", "Main test suite skips release-small builds") orelse skip_release; @@ -222,7 +213,6 @@ pub fn build(b: *std.Build) !void { if (target.result.os.tag == .windows and target.result.abi == .gnu) { // LTO is currently broken on mingw, this can be removed when it's fixed. exe.want_lto = false; - check_case_exe.want_lto = false; } const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend"); @@ -245,7 +235,6 @@ pub fn build(b: *std.Build) !void { if (link_libc) { exe.linkLibC(); - check_case_exe.linkLibC(); } const is_debug = optimize == .Debug; @@ -339,21 +328,17 @@ pub fn build(b: *std.Build) !void { } try addCmakeCfgOptionsToExe(b, cfg, exe, use_zig_libcxx); - try addCmakeCfgOptionsToExe(b, cfg, check_case_exe, use_zig_libcxx); } else { // Here we are -Denable-llvm but no cmake integration. try addStaticLlvmOptionsToExe(exe); - try addStaticLlvmOptionsToExe(check_case_exe); } if (target.result.os.tag == .windows) { - inline for (.{ exe, check_case_exe }) |artifact| { - // LLVM depends on networking as of version 18. - artifact.linkSystemLibrary("ws2_32"); + // LLVM depends on networking as of version 18. + exe.linkSystemLibrary("ws2_32"); - artifact.linkSystemLibrary("version"); - artifact.linkSystemLibrary("uuid"); - artifact.linkSystemLibrary("ole32"); - } + exe.linkSystemLibrary("version"); + exe.linkSystemLibrary("uuid"); + exe.linkSystemLibrary("ole32"); } } @@ -394,7 +379,6 @@ pub fn build(b: *std.Build) !void { const test_filters = b.option([]const []const u8, "test-filter", "Skip tests that do not match any filter") orelse &[0][]const u8{}; const test_cases_options = b.addOptions(); - check_case_exe.root_module.addOptions("build_options", test_cases_options); test_cases_options.addOption(bool, "enable_tracy", false); test_cases_options.addOption(bool, "enable_debug_extensions", enable_debug_extensions); @@ -458,7 +442,7 @@ pub fn build(b: *std.Build) !void { test_step.dependOn(check_fmt); const test_cases_step = b.step("test-cases", "Run the main compiler test cases"); - try tests.addCases(b, test_cases_step, test_filters, check_case_exe, target, .{ + try tests.addCases(b, test_cases_step, test_filters, target, .{ .skip_translate_c = skip_translate_c, .skip_run_translated_c = skip_run_translated_c, }, .{ diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index a1db48b470..f5ce0c8da8 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -17,12 +17,15 @@ pub const DynLib = struct { DlDynLib, .windows => WindowsDynLib, .macos, .tvos, .watchos, .ios, .visionos, .freebsd, .netbsd, .openbsd, .dragonfly, .solaris, .illumos => DlDynLib, - else => @compileError("unsupported platform"), + else => struct { + const open = @compileError("unsupported platform"); + const openZ = @compileError("unsupported platform"); + }, }; inner: InnerType, - pub const Error = ElfDynLib.Error || DlDynLib.Error || WindowsDynLib.Error; + pub const Error = ElfDynLibError || DlDynLibError || WindowsDynLibError; /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn open(path: []const u8) Error!DynLib { @@ -122,6 +125,18 @@ pub fn linkmap_iterator(phdrs: []elf.Phdr) error{InvalidExe}!LinkMap.Iterator { return .{ .current = link_map_ptr }; } +/// Separated to avoid referencing `ElfDynLib`, because its field types may not +/// be valid on other targets. +const ElfDynLibError = error{ + FileTooBig, + NotElfFile, + NotDynamicLibrary, + MissingDynamicLinkingInformation, + ElfStringSectionNotFound, + ElfSymSectionNotFound, + ElfHashTableNotFound, +} || posix.OpenError || posix.MMapError; + pub const ElfDynLib = struct { strings: [*:0]u8, syms: [*]elf.Sym, @@ -130,15 +145,7 @@ pub const ElfDynLib = struct { verdef: ?*elf.Verdef, memory: []align(mem.page_size) u8, - pub const Error = error{ - FileTooBig, - NotElfFile, - NotDynamicLibrary, - MissingDynamicLinkingInformation, - ElfStringSectionNotFound, - ElfSymSectionNotFound, - ElfHashTableNotFound, - } || posix.OpenError || posix.MMapError; + pub const Error = ElfDynLibError; /// Trusts the file. Malicious file will be able to execute arbitrary code. pub fn open(path: []const u8) Error!ElfDynLib { @@ -350,11 +357,15 @@ test "ElfDynLib" { try testing.expectError(error.FileNotFound, ElfDynLib.open("invalid_so.so")); } +/// Separated to avoid referencing `WindowsDynLib`, because its field types may not +/// be valid on other targets. +const WindowsDynLibError = error{ + FileNotFound, + InvalidPath, +} || windows.LoadLibraryError; + pub const WindowsDynLib = struct { - pub const Error = error{ - FileNotFound, - InvalidPath, - } || windows.LoadLibraryError; + pub const Error = WindowsDynLibError; dll: windows.HMODULE, @@ -413,8 +424,12 @@ pub const WindowsDynLib = struct { } }; +/// Separated to avoid referencing `DlDynLib`, because its field types may not +/// be valid on other targets. +const DlDynLibError = error{ FileNotFound, NameTooLong }; + pub const DlDynLib = struct { - pub const Error = error{ FileNotFound, NameTooLong }; + pub const Error = DlDynLibError; handle: *anyopaque, diff --git a/lib/std/http.zig b/lib/std/http.zig index af966d89e7..621c7a5f0d 100644 --- a/lib/std/http.zig +++ b/lib/std/http.zig @@ -311,13 +311,13 @@ const builtin = @import("builtin"); const std = @import("std.zig"); test { - _ = Client; - _ = Method; - _ = Server; - _ = Status; - _ = HeadParser; - _ = ChunkParser; if (builtin.os.tag != .wasi) { + _ = Client; + _ = Method; + _ = Server; + _ = Status; + _ = HeadParser; + _ = ChunkParser; _ = @import("http/test.zig"); } } diff --git a/lib/std/net.zig b/lib/std/net.zig index 79ca71d0e2..b46cc2aece 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1930,8 +1930,10 @@ pub const Server = struct { }; test { - _ = @import("net/test.zig"); - _ = Server; - _ = Stream; - _ = Address; + if (builtin.os.tag != .wasi) { + _ = Server; + _ = Stream; + _ = Address; + _ = @import("net/test.zig"); + } } diff --git a/lib/zig.h b/lib/zig.h index 1171c7efac..f3b3897186 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -207,16 +207,16 @@ typedef char bool; __asm(zig_mangle_c(name) " = " zig_mangle_c(symbol)) #endif +#define zig_mangled_tentative zig_mangled +#define zig_mangled_final zig_mangled #if _MSC_VER -#define zig_mangled_tentative(mangled, unmangled) -#define zig_mangled_final(mangled, unmangled) ; \ +#define zig_mangled(mangled, unmangled) ; \ zig_export(#mangled, unmangled) #define zig_mangled_export(mangled, unmangled, symbol) \ zig_export(unmangled, #mangled) \ zig_export(symbol, unmangled) #else /* _MSC_VER */ -#define zig_mangled_tentative(mangled, unmangled) __asm(zig_mangle_c(unmangled)) -#define zig_mangled_final(mangled, unmangled) zig_mangled_tentative(mangled, unmangled) +#define zig_mangled(mangled, unmangled) __asm(zig_mangle_c(unmangled)) #define zig_mangled_export(mangled, unmangled, symbol) \ zig_mangled_final(mangled, unmangled) \ zig_export(symbol, unmangled) diff --git a/src/Air.zig b/src/Air.zig index e70f73432f..5799c31b25 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -9,7 +9,7 @@ const assert = std.debug.assert; const Air = @This(); const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const InternPool = @import("InternPool.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. @@ -1801,3 +1801,5 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } + +pub const typesFullyResolved = @import("Air/types_resolved.zig").typesFullyResolved; diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig new file mode 100644 index 0000000000..073f2d68d4 --- /dev/null +++ b/src/Air/types_resolved.zig @@ -0,0 +1,521 @@ +const Air = @import("../Air.zig"); +const Zcu = @import("../Zcu.zig"); +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); +const InternPool = @import("../InternPool.zig"); + +/// Given a body of AIR instructions, returns whether all type resolution necessary for codegen is complete. +/// If `false`, then type resolution must have failed, so codegen cannot proceed. +pub fn typesFullyResolved(air: Air, zcu: *Zcu) bool { + return checkBody(air, air.getMainBody(), zcu); +} + +fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { + const tags = air.instructions.items(.tag); + const datas = air.instructions.items(.data); + + for (body) |inst| { + const data = datas[@intFromEnum(inst)]; + switch (tags[@intFromEnum(inst)]) { + .inferred_alloc, .inferred_alloc_comptime => unreachable, + + .arg => { + if (!checkType(data.arg.ty.toType(), zcu)) return false; + }, + + .add, + .add_safe, + .add_optimized, + .add_wrap, + .add_sat, + .sub, + .sub_safe, + .sub_optimized, + .sub_wrap, + .sub_sat, + .mul, + .mul_safe, + .mul_optimized, + .mul_wrap, + .mul_sat, + .div_float, + .div_float_optimized, + .div_trunc, + .div_trunc_optimized, + .div_floor, + .div_floor_optimized, + .div_exact, + .div_exact_optimized, + .rem, + .rem_optimized, + .mod, + .mod_optimized, + .max, + .min, + .bit_and, + .bit_or, + .shr, + .shr_exact, + .shl, + .shl_exact, + .shl_sat, + .xor, + .cmp_lt, + .cmp_lt_optimized, + .cmp_lte, + .cmp_lte_optimized, + .cmp_eq, + .cmp_eq_optimized, + .cmp_gte, + .cmp_gte_optimized, + .cmp_gt, + .cmp_gt_optimized, + .cmp_neq, + .cmp_neq_optimized, + .bool_and, + .bool_or, + .store, + .store_safe, + .set_union_tag, + .array_elem_val, + .slice_elem_val, + .ptr_elem_val, + .memset, + .memset_safe, + .memcpy, + .atomic_store_unordered, + .atomic_store_monotonic, + .atomic_store_release, + .atomic_store_seq_cst, + => { + if (!checkRef(data.bin_op.lhs, zcu)) return false; + if (!checkRef(data.bin_op.rhs, zcu)) return false; + }, + + .not, + .bitcast, + .clz, + .ctz, + .popcount, + .byte_swap, + .bit_reverse, + .abs, + .load, + .fptrunc, + .fpext, + .intcast, + .trunc, + .optional_payload, + .optional_payload_ptr, + .optional_payload_ptr_set, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .errunion_payload_ptr_set, + .wrap_errunion_payload, + .wrap_errunion_err, + .struct_field_ptr_index_0, + .struct_field_ptr_index_1, + .struct_field_ptr_index_2, + .struct_field_ptr_index_3, + .get_union_tag, + .slice_len, + .slice_ptr, + .ptr_slice_len_ptr, + .ptr_slice_ptr_ptr, + .array_to_slice, + .int_from_float, + .int_from_float_optimized, + .float_from_int, + .splat, + .error_set_has_value, + .addrspace_cast, + .c_va_arg, + .c_va_copy, + => { + if (!checkType(data.ty_op.ty.toType(), zcu)) return false; + if (!checkRef(data.ty_op.operand, zcu)) return false; + }, + + .alloc, + .ret_ptr, + .c_va_start, + => { + if (!checkType(data.ty, zcu)) return false; + }, + + .ptr_add, + .ptr_sub, + .add_with_overflow, + .sub_with_overflow, + .mul_with_overflow, + .shl_with_overflow, + .slice, + .slice_elem_ptr, + .ptr_elem_ptr, + => { + const bin = air.extraData(Air.Bin, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .block, + .loop, + => { + const extra = air.extraData(Air.Block, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .dbg_inline_block => { + const extra = air.extraData(Air.DbgInlineBlock, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .sqrt, + .sin, + .cos, + .tan, + .exp, + .exp2, + .log, + .log2, + .log10, + .floor, + .ceil, + .round, + .trunc_float, + .neg, + .neg_optimized, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .int_from_ptr, + .int_from_bool, + .ret, + .ret_safe, + .ret_load, + .is_named_enum_value, + .tag_name, + .error_name, + .cmp_lt_errors_len, + .c_va_end, + .set_err_return_trace, + => { + if (!checkRef(data.un_op, zcu)) return false; + }, + + .br => { + if (!checkRef(data.br.operand, zcu)) return false; + }, + + .cmp_vector, + .cmp_vector_optimized, + => { + const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.lhs, zcu)) return false; + if (!checkRef(extra.rhs, zcu)) return false; + }, + + .reduce, + .reduce_optimized, + => { + if (!checkRef(data.reduce.operand, zcu)) return false; + }, + + .struct_field_ptr, + .struct_field_val, + => { + const extra = air.extraData(Air.StructField, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.struct_operand, zcu)) return false; + }, + + .shuffle => { + const extra = air.extraData(Air.Shuffle, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.a, zcu)) return false; + if (!checkRef(extra.b, zcu)) return false; + if (!checkVal(Value.fromInterned(extra.mask), zcu)) return false; + }, + + .cmpxchg_weak, + .cmpxchg_strong, + => { + const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.ptr, zcu)) return false; + if (!checkRef(extra.expected_value, zcu)) return false; + if (!checkRef(extra.new_value, zcu)) return false; + }, + + .aggregate_init => { + const ty = data.ty_pl.ty.toType(); + const elems_len: usize = @intCast(ty.arrayLen(zcu)); + const elems: []const Air.Inst.Ref = @ptrCast(air.extra[data.ty_pl.payload..][0..elems_len]); + if (!checkType(ty, zcu)) return false; + if (ty.zigTypeTag(zcu) == .Struct) { + for (elems, 0..) |elem, elem_idx| { + if (ty.structFieldIsComptime(elem_idx, zcu)) continue; + if (!checkRef(elem, zcu)) return false; + } + } else { + for (elems) |elem| { + if (!checkRef(elem, zcu)) return false; + } + } + }, + + .union_init => { + const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.init, zcu)) return false; + }, + + .field_parent_ptr => { + const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data; + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.field_ptr, zcu)) return false; + }, + + .atomic_load => { + if (!checkRef(data.atomic_load.ptr, zcu)) return false; + }, + + .prefetch => { + if (!checkRef(data.prefetch.ptr, zcu)) return false; + }, + + .vector_store_elem => { + const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data; + if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .select, + .mul_add, + => { + const bin = air.extraData(Air.Bin, data.pl_op.payload).data; + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkRef(bin.lhs, zcu)) return false; + if (!checkRef(bin.rhs, zcu)) return false; + }, + + .atomic_rmw => { + const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data; + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkRef(extra.operand, zcu)) return false; + }, + + .call, + .call_always_tail, + .call_never_tail, + .call_never_inline, + => { + const extra = air.extraData(Air.Call, data.pl_op.payload); + const args: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.args_len]); + if (!checkRef(data.pl_op.operand, zcu)) return false; + for (args) |arg| if (!checkRef(arg, zcu)) return false; + }, + + .dbg_var_ptr, + .dbg_var_val, + => { + if (!checkRef(data.pl_op.operand, zcu)) return false; + }, + + .@"try" => { + const extra = air.extraData(Air.Try, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .try_ptr => { + const extra = air.extraData(Air.TryPtr, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + if (!checkRef(extra.data.ptr, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.body_len]), + zcu, + )) return false; + }, + + .cond_br => { + const extra = air.extraData(Air.CondBr, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end..][0..extra.data.then_body_len]), + zcu, + )) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]), + zcu, + )) return false; + }, + + .switch_br => { + const extra = air.extraData(Air.SwitchBr, data.pl_op.payload); + if (!checkRef(data.pl_op.operand, zcu)) return false; + var extra_index = extra.end; + for (0..extra.data.cases_len) |_| { + const case = air.extraData(Air.SwitchBr.Case, extra_index); + extra_index = case.end; + const items: []const Air.Inst.Ref = @ptrCast(air.extra[extra_index..][0..case.data.items_len]); + extra_index += case.data.items_len; + for (items) |item| if (!checkRef(item, zcu)) return false; + if (!checkBody( + air, + @ptrCast(air.extra[extra_index..][0..case.data.body_len]), + zcu, + )) return false; + extra_index += case.data.body_len; + } + if (!checkBody( + air, + @ptrCast(air.extra[extra_index..][0..extra.data.else_body_len]), + zcu, + )) return false; + }, + + .assembly => { + const extra = air.extraData(Air.Asm, data.ty_pl.payload); + if (!checkType(data.ty_pl.ty.toType(), zcu)) return false; + // Luckily, we only care about the inputs and outputs, so we don't have to do + // the whole null-terminated string dance. + const outputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end..][0..extra.data.outputs_len]); + const inputs: []const Air.Inst.Ref = @ptrCast(air.extra[extra.end + extra.data.outputs_len ..][0..extra.data.inputs_len]); + for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false; + for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false; + }, + + .trap, + .breakpoint, + .ret_addr, + .frame_addr, + .unreach, + .wasm_memory_size, + .wasm_memory_grow, + .work_item_id, + .work_group_size, + .work_group_id, + .fence, + .dbg_stmt, + .err_return_trace, + .save_err_return_trace_index, + => {}, + } + } + return true; +} + +fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool { + const ip_index = ref.toInterned() orelse { + // This operand refers back to a previous instruction. + // We have already checked that instruction's type. + // So, there's no need to check this operand's type. + return true; + }; + return checkVal(Value.fromInterned(ip_index), zcu); +} + +fn checkVal(val: Value, zcu: *Zcu) bool { + if (!checkType(val.typeOf(zcu), zcu)) return false; + // Check for lazy values + switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return true, + .lazy_align, .lazy_size => |ty_index| { + return checkType(Type.fromInterned(ty_index), zcu); + }, + }, + else => return true, + } +} + +fn checkType(ty: Type, zcu: *Zcu) bool { + const ip = &zcu.intern_pool; + return switch (ty.zigTypeTag(zcu)) { + .Type, + .Void, + .Bool, + .NoReturn, + .Int, + .Float, + .ErrorSet, + .Enum, + .Opaque, + .Vector, + // These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness. + // It's a little silly -- but fine, we'll return `true`. + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => true, + + .Frame, + .AnyFrame, + => @panic("TODO Air.types_resolved.checkType async frames"), + + .Optional => checkType(ty.childType(zcu), zcu), + .ErrorUnion => checkType(ty.errorUnionPayload(zcu), zcu), + .Pointer => checkType(ty.childType(zcu), zcu), + .Array => checkType(ty.childType(zcu), zcu), + + .Fn => { + const info = zcu.typeToFunc(ty).?; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + if (!checkType(Type.fromInterned(param_ty), zcu)) return false; + } + return checkType(Type.fromInterned(info.return_type), zcu); + }, + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_obj = zcu.typeToStruct(ty).?; + return switch (struct_obj.layout) { + .@"packed" => struct_obj.backingIntType(ip).* != .none, + .auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved, + }; + }, + .anon_struct_type => |tuple| { + for (0..tuple.types.len) |i| { + const field_is_comptime = tuple.values.get(ip)[i] != .none; + if (field_is_comptime) continue; + const field_ty = tuple.types.get(ip)[i]; + if (!checkType(Type.fromInterned(field_ty), zcu)) return false; + } + return true; + }, + else => unreachable, + }, + .Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved, + }; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b72a58f7fc..9d3a31e792 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -12,7 +12,7 @@ const WaitGroup = std.Thread.WaitGroup; const ErrorBundle = std.zig.ErrorBundle; const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); @@ -31,11 +31,13 @@ const clangMain = @import("main.zig").clangMain; const Zcu = @import("Zcu.zig"); /// Deprecated; use `Zcu`. const Module = Zcu; +const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; const c_codegen = @import("codegen/c.zig"); const libtsan = @import("libtsan.zig"); const Zir = std.zig.Zir; +const Air = @import("Air.zig"); const Builtin = @import("Builtin.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; @@ -315,18 +317,29 @@ const Job = union(enum) { codegen_decl: InternPool.DeclIndex, /// Write the machine code for a function to the output file. /// This will either be a non-generic `func_decl` or a `func_instance`. - codegen_func: InternPool.Index, + codegen_func: struct { + func: InternPool.Index, + /// This `Air` is owned by the `Job` and allocated with `gpa`. + /// It must be deinited when the job is processed. + air: Air, + }, /// Render the .h file snippet for the Decl. emit_h_decl: InternPool.DeclIndex, /// The Decl needs to be analyzed and possibly export itself. /// It may have already be analyzed, or it may have been determined /// to be outdated; in this case perform semantic analysis again. analyze_decl: InternPool.DeclIndex, + /// Analyze the body of a runtime function. + /// After analysis, a `codegen_func` job will be queued. + /// These must be separate jobs to ensure any needed type resolution occurs *before* codegen. + analyze_func: InternPool.Index, /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. update_line_number: InternPool.DeclIndex, /// The main source file for the module needs to be analyzed. analyze_mod: *Package.Module, + /// Fully resolve the given `struct` or `union` type. + resolve_type_fully: InternPool.Index, /// one of the glibc static objects glibc_crt_file: glibc.CRTFile, @@ -2628,22 +2641,24 @@ fn reportMultiModuleErrors(mod: *Module) !void { for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| { errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa); note.* = switch (ref) { - .import => |loc| blk: { - break :blk try Module.ErrorMsg.init( - mod.gpa, - loc, - "imported from module {s}", - .{loc.file_scope.mod.fully_qualified_name}, - ); - }, - .root => |pkg| blk: { - break :blk try Module.ErrorMsg.init( - mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, - "root of module {s}", - .{pkg.fully_qualified_name}, - ); - }, + .import => |import| try Module.ErrorMsg.init( + mod.gpa, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, import.file, .main_struct_inst), + .offset = .{ .token_abs = import.token }, + }, + "imported from module {s}", + .{import.file.mod.fully_qualified_name}, + ), + .root => |pkg| try Module.ErrorMsg.init( + mod.gpa, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, + "root of module {s}", + .{pkg.fully_qualified_name}, + ), }; } errdefer for (notes[0..num_notes]) |*n| n.deinit(mod.gpa); @@ -2651,7 +2666,10 @@ fn reportMultiModuleErrors(mod: *Module) !void { if (omitted > 0) { notes[num_notes] = try Module.ErrorMsg.init( mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, "{} more references omitted", .{omitted}, ); @@ -2660,7 +2678,10 @@ fn reportMultiModuleErrors(mod: *Module) !void { const err = try Module.ErrorMsg.create( mod.gpa, - .{ .file_scope = file, .base_node = 0, .lazy = .entire_file }, + .{ + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, + }, "file exists in multiple modules", .{}, ); @@ -2831,11 +2852,11 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (comp.module) |module| { - total += module.failed_exports.count(); - total += module.failed_embed_files.count(); + if (comp.module) |zcu| { + total += zcu.failed_exports.count(); + total += zcu.failed_embed_files.count(); - for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |_| { total += 1; } else { @@ -2851,23 +2872,27 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // When a parse error is introduced, we keep all the semantic analysis for // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. - for (module.failed_decls.keys()) |key| { - if (module.declFileScope(key).okToReportErrors()) { + for (zcu.failed_analysis.keys()) |key| { + const decl_index = switch (key.unwrap()) { + .decl => |d| d, + .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + }; + if (zcu.declFileScope(decl_index).okToReportErrors()) { total += 1; - if (module.cimport_errors.get(key)) |errors| { + if (zcu.cimport_errors.get(key)) |errors| { total += errors.errorMessageCount(); } } } - if (module.emit_h) |emit_h| { + if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - if (module.declFileScope(key).okToReportErrors()) { + if (zcu.declFileScope(key).okToReportErrors()) { total += 1; } } } - if (module.global_error_set.entries.len - 1 > module.error_limit) { + if (zcu.global_error_set.entries.len - 1 > zcu.error_limit) { total += 1; } } @@ -2882,8 +2907,8 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // Compile log errors only count if there are no other errors. if (total == 0) { - if (comp.module) |module| { - total += @intFromBool(module.compile_log_decls.count() != 0); + if (comp.module) |zcu| { + total += @intFromBool(zcu.compile_log_sources.count() != 0); } } @@ -2934,10 +2959,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { .msg = try bundle.addString("memory allocation failure"), }); } - if (comp.module) |module| { - for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| { + if (comp.module) |zcu| { + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(gpa); + + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(module, &bundle, msg.*); + try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2945,54 +2973,59 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try addZirErrorMessages(&bundle, file); } } - for (module.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(module, &bundle, error_msg.*); + for (zcu.failed_embed_files.values()) |error_msg| { + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } - for (module.failed_decls.keys(), module.failed_decls.values()) |decl_index, error_msg| { + for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { + const decl_index = switch (anal_unit.unwrap()) { + .decl => |d| d, + .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + }; + // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(module, &bundle, error_msg.*); - if (module.cimport_errors.get(decl_index)) |errors| { - for (errors.getMessages()) |err_msg_index| { - const err_msg = errors.getErrorMessage(err_msg_index); - try bundle.addRootErrorMessage(.{ - .msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), - .src_loc = if (err_msg.src_loc != .none) blk: { - const src_loc = errors.getSourceLocation(err_msg.src_loc); - break :blk try bundle.addSourceLocation(.{ - .src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), - .span_start = src_loc.span_start, - .span_main = src_loc.span_main, - .span_end = src_loc.span_end, - .line = src_loc.line, - .column = src_loc.column, - .source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, - }); - } else .none, - }); - } + if (!zcu.declFileScope(decl_index).okToReportErrors()) continue; + + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); + if (zcu.cimport_errors.get(anal_unit)) |errors| { + for (errors.getMessages()) |err_msg_index| { + const err_msg = errors.getErrorMessage(err_msg_index); + try bundle.addRootErrorMessage(.{ + .msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)), + .src_loc = if (err_msg.src_loc != .none) blk: { + const src_loc = errors.getSourceLocation(err_msg.src_loc); + break :blk try bundle.addSourceLocation(.{ + .src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)), + .span_start = src_loc.span_start, + .span_main = src_loc.span_main, + .span_end = src_loc.span_end, + .line = src_loc.line, + .column = src_loc.column, + .source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0, + }); + } else .none, + }); } } } - if (module.emit_h) |emit_h| { + if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.keys(), emit_h.failed_decls.values()) |decl_index, error_msg| { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(module, &bundle, error_msg.*); + if (zcu.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } } } - for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(module, &bundle, value.*); + for (zcu.failed_exports.values()) |value| { + try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); } - const actual_error_count = module.global_error_set.entries.len - 1; - if (actual_error_count > module.error_limit) { + const actual_error_count = zcu.global_error_set.entries.len - 1; + if (actual_error_count > zcu.error_limit) { try bundle.addRootErrorMessage(.{ - .msg = try bundle.printString("module used more errors than possible: used {d}, max {d}", .{ - actual_error_count, module.error_limit, + .msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{ + actual_error_count, zcu.error_limit, }), .notes_len = 1, }); @@ -3041,25 +3074,28 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } if (comp.module) |zcu| { - if (bundle.root_list.items.len == 0 and zcu.compile_log_decls.count() != 0) { - const values = zcu.compile_log_decls.values(); + if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(gpa); + + const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. - const src_loc = values[0].src().upgrade(zcu); + const src_loc = values[0].src(); const err_msg: Module.ErrorMsg = .{ .src_loc = src_loc, .msg = "found compile log statement", - .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_decls.count() - 1), + .notes = try gpa.alloc(Module.ErrorMsg, zcu.compile_log_sources.count() - 1), }; defer gpa.free(err_msg.notes); for (values[1..], err_msg.notes) |src_info, *note| { note.* = .{ - .src_loc = src_info.src().upgrade(zcu), + .src_loc = src_info.src(), .msg = "also here", }; } - try addModuleErrorMsg(zcu, &bundle, err_msg); + try addModuleErrorMsg(zcu, &bundle, err_msg, &all_references); } } @@ -3115,11 +3151,17 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg( + mod: *Module, + eb: *ErrorBundle.Wip, + module_err_msg: Module.ErrorMsg, + all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), +) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; - const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { - const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); + const err_src_loc = module_err_msg.src_loc.upgrade(mod); + const err_source = err_src_loc.file_scope.getSource(gpa) catch |err| { + const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); try eb.addRootErrorMessage(.{ .msg = try eb.printString("unable to load '{s}': {s}", .{ @@ -3128,47 +3170,57 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod }); return; }; - const err_span = try module_err_msg.src_loc.span(gpa); + const err_span = try err_src_loc.span(gpa); const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main); - const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); + const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; defer ref_traces.deinit(gpa); - const remaining_references: ?u32 = remaining: { - if (mod.comp.reference_trace) |_| { - if (module_err_msg.hidden_references > 0) break :remaining module_err_msg.hidden_references; - } else { - if (module_err_msg.reference_trace.len > 0) break :remaining 0; - } - break :remaining null; - }; - try ref_traces.ensureTotalCapacityPrecise(gpa, module_err_msg.reference_trace.len + - @intFromBool(remaining_references != null)); + if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { + var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{}; + defer seen.deinit(gpa); - for (module_err_msg.reference_trace) |module_reference| { - const source = try module_reference.src_loc.file_scope.getSource(gpa); - const span = try module_reference.src_loc.span(gpa); - const loc = std.zig.findLineColumn(source.bytes, span.main); - const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); - defer gpa.free(rt_file_path); - ref_traces.appendAssumeCapacity(.{ - .decl_name = try eb.addString(module_reference.decl.toSlice(ip)), - .src_loc = try eb.addSourceLocation(.{ - .src_path = try eb.addString(rt_file_path), - .span_start = span.start, - .span_main = span.main, - .span_end = span.end, - .line = @intCast(loc.line), - .column = @intCast(loc.column), - .source_line = 0, - }), - }); + const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; + + var referenced_by = rt_root; + while (all_references.get(referenced_by)) |ref| { + const gop = try seen.getOrPut(gpa, ref.referencer); + if (gop.found_existing) break; + if (ref_traces.items.len < max_references) { + const src = ref.src.upgrade(mod); + const source = try src.file_scope.getSource(gpa); + const span = try src.span(gpa); + const loc = std.zig.findLineColumn(source.bytes, span.main); + const rt_file_path = try src.file_scope.fullPath(gpa); + const name = switch (ref.referencer.unwrap()) { + .decl => |d| mod.declPtr(d).name, + .func => |f| mod.funcOwnerDeclPtr(f).name, + }; + try ref_traces.append(gpa, .{ + .decl_name = try eb.addString(name.toSlice(ip)), + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(rt_file_path), + .span_start = span.start, + .span_main = span.main, + .span_end = span.end, + .line = @intCast(loc.line), + .column = @intCast(loc.column), + .source_line = 0, + }), + }); + } + referenced_by = ref.referencer; + } + + if (seen.count() > ref_traces.items.len) { + try ref_traces.append(gpa, .{ + .decl_name = @intCast(seen.count() - ref_traces.items.len), + .src_loc = .none, + }); + } } - if (remaining_references) |remaining| ref_traces.appendAssumeCapacity( - .{ .decl_name = remaining, .src_loc = .none }, - ); const src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(file_path), @@ -3177,7 +3229,7 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod .span_end = err_span.end, .line = @intCast(err_loc.line), .column = @intCast(err_loc.column), - .source_line = if (module_err_msg.src_loc.lazy == .entire_file) + .source_line = if (err_src_loc.lazy == .entire_file) 0 else try eb.addString(err_loc.source_line), @@ -3194,10 +3246,11 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod defer notes.deinit(gpa); for (module_err_msg.notes) |module_note| { - const source = try module_note.src_loc.file_scope.getSource(gpa); - const span = try module_note.src_loc.span(gpa); + const note_src_loc = module_note.src_loc.upgrade(mod); + const source = try note_src_loc.file_scope.getSource(gpa); + const span = try note_src_loc.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); - const note_file_path = try module_note.src_loc.file_scope.fullPath(gpa); + const note_file_path = try note_src_loc.file_scope.fullPath(gpa); defer gpa.free(note_file_path); const gop = try notes.getOrPutContext(gpa, .{ @@ -3348,7 +3401,7 @@ pub fn performAllTheWork( if (try zcu.findOutdatedToAnalyze()) |outdated| { switch (outdated.unwrap()) { .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), - .func => |func| try comp.work_queue.writeItem(.{ .codegen_func = func }), + .func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }), } continue; } @@ -3398,6 +3451,14 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const named_frame = tracy.namedFrame("codegen_func"); defer named_frame.end(); + const module = comp.module.?; + // This call takes ownership of `func.air`. + try module.linkerUpdateFunc(func.func, func.air); + }, + .analyze_func => |func| { + const named_frame = tracy.namedFrame("analyze_func"); + defer named_frame.end(); + const module = comp.module.?; module.ensureFuncBodyAnalyzed(func) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -3405,6 +3466,9 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo }; }, .emit_h_decl => |decl_index| { + if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ + "not decl analysis, which is too early to know about @export calls"); + const module = comp.module.?; const decl = module.declPtr(decl_index); @@ -3477,6 +3541,16 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, + .resolve_type_fully => |ty| { + const named_frame = tracy.namedFrame("resolve_type_fully"); + defer named_frame.end(); + + const zcu = comp.module.?; + Type.fromInterned(ty).resolveFully(zcu) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => return, + }; + }, .update_line_number => |decl_index| { const named_frame = tracy.namedFrame("update_line_number"); defer named_frame.end(); @@ -3486,15 +3560,18 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !vo const decl = module.declPtr(decl_index); const lf = comp.bin_file.?; lf.updateDeclLineNumber(module, decl_index) catch |err| { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( - gpa, - decl.navSrcLoc(module).upgrade(module), - "unable to update line number: {s}", - .{@errorName(err)}, - )); + try module.failed_analysis.ensureUnusedCapacity(gpa, 1); + module.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + try Module.ErrorMsg.create( + gpa, + decl.navSrcLoc(module), + "unable to update line number: {s}", + .{@errorName(err)}, + ), + ); decl.analysis = .codegen_failure; - try module.retryable_failures.append(gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + try module.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); }; }, .analyze_mod => |pkg| { @@ -3989,9 +4066,8 @@ fn workerAstGenFile( const res = mod.importFile(file, import_path) catch continue; if (!res.is_pkg) { res.file.addReference(mod.*, .{ .import = .{ - .file_scope = file, - .base_node = 0, - .lazy = .{ .token_abs = item.data.token }, + .file = file, + .token = item.data.token, } }) catch continue; } break :blk res; @@ -4364,20 +4440,14 @@ fn reportRetryableAstGenError( file.status = .retryable_failure; - const src_loc: Module.SrcLoc = switch (src) { + const src_loc: Module.LazySrcLoc = switch (src) { .root => .{ - .file_scope = file, - .base_node = 0, - .lazy = .entire_file, + .base_node_inst = try mod.intern_pool.trackZir(gpa, file, .main_struct_inst), + .offset = .entire_file, }, - .import => |info| blk: { - const importing_file = info.importing_file; - - break :blk .{ - .file_scope = importing_file, - .base_node = 0, - .lazy = .{ .token_abs = info.import_tok }, - }; + .import => |info| .{ + .base_node_inst = try mod.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst), + .offset = .{ .token_abs = info.import_tok }, }, }; diff --git a/src/InternPool.zig b/src/InternPool.zig index cf56550c25..c6b27acaf3 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -81,7 +81,7 @@ namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.In /// Given a `Depender`, points to an entry in `dep_entries` whose `depender` /// matches. The `next_dependee` field can be used to iterate all such entries /// and remove them from the corresponding lists. -first_dependency: std.AutoArrayHashMapUnmanaged(AnalSubject, DepEntry.Index) = .{}, +first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .{}, /// Stores dependency information. The hashmaps declared above are used to look /// up entries in this list as required. This is not stored in `extra` so that @@ -132,36 +132,36 @@ pub fn trackZir(ip: *InternPool, gpa: Allocator, file: *Module.File, inst: Zir.I return @enumFromInt(gop.index); } -/// Analysis Subject. Represents a single entity which undergoes semantic analysis. +/// Analysis Unit. Represents a single entity which undergoes semantic analysis. /// This is either a `Decl` (in future `Cau`) or a runtime function. /// The LSB is used as a tag bit. /// This is the "source" of an incremental dependency edge. -pub const AnalSubject = packed struct(u32) { +pub const AnalUnit = packed struct(u32) { kind: enum(u1) { decl, func }, index: u31, pub const Unwrapped = union(enum) { decl: DeclIndex, func: InternPool.Index, }; - pub fn unwrap(as: AnalSubject) Unwrapped { + pub fn unwrap(as: AnalUnit) Unwrapped { return switch (as.kind) { .decl => .{ .decl = @enumFromInt(as.index) }, .func => .{ .func = @enumFromInt(as.index) }, }; } - pub fn wrap(raw: Unwrapped) AnalSubject { + pub fn wrap(raw: Unwrapped) AnalUnit { return switch (raw) { .decl => |decl| .{ .kind = .decl, .index = @intCast(@intFromEnum(decl)) }, .func => |func| .{ .kind = .func, .index = @intCast(@intFromEnum(func)) }, }; } - pub fn toOptional(as: AnalSubject) Optional { + pub fn toOptional(as: AnalUnit) Optional { return @enumFromInt(@as(u32, @bitCast(as))); } pub const Optional = enum(u32) { none = std.math.maxInt(u32), _, - pub fn unwrap(opt: Optional) ?AnalSubject { + pub fn unwrap(opt: Optional) ?AnalUnit { return switch (opt) { .none => null, _ => @bitCast(@intFromEnum(opt)), @@ -178,7 +178,7 @@ pub const Dependee = union(enum) { namespace_name: NamespaceNameKey, }; -pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalSubject) void { +pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalUnit) void { var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional(); while (opt_idx.unwrap()) |idx| { @@ -207,7 +207,7 @@ pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: pub const DependencyIterator = struct { ip: *const InternPool, next_entry: DepEntry.Index.Optional, - pub fn next(it: *DependencyIterator) ?AnalSubject { + pub fn next(it: *DependencyIterator) ?AnalUnit { const idx = it.next_entry.unwrap() orelse return null; const entry = it.ip.dep_entries.items[@intFromEnum(idx)]; it.next_entry = entry.next; @@ -236,7 +236,7 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI }; } -pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalSubject, dependee: Dependee) Allocator.Error!void { +pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, dependee: Dependee) Allocator.Error!void { const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: { // The entry already exists, so there is capacity to overwrite it later. break :dep idx.toOptional(); @@ -300,7 +300,7 @@ pub const DepEntry = extern struct { /// the first and only entry in one of `intern_pool.*_deps`, and does not /// appear in any list by `first_dependency`, but is not in /// `free_dep_entries` since `*_deps` stores a reference to it. - depender: AnalSubject.Optional, + depender: AnalUnit.Optional, /// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. /// Used to iterate all dependers for a given dependee during an update. /// null if this is the end of the list. diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 30b8c273cd..01d9157767 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const Order = std.math.Order; const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. diff --git a/src/Sema.zig b/src/Sema.zig index db520f5789..0d4cf26871 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -64,14 +64,6 @@ generic_owner: InternPool.Index = .none, /// instantiation can point back to the instantiation site in addition to the /// declaration site. generic_call_src: LazySrcLoc = LazySrcLoc.unneeded, -/// The key is types that must be fully resolved prior to machine code -/// generation pass. Types are added to this set when resolving them -/// immediately could cause a dependency loop, but they do need to be resolved -/// before machine code generation passes process the AIR. -/// It would work fine if this were an array list instead of an array hash map. -/// I chose array hash map with the intention to save time by omitting -/// duplicates. -types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -117,6 +109,15 @@ maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAll /// Backed by gpa. comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{}, +/// A list of exports performed by this analysis. After this `Sema` terminates, +/// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`. +exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, + +/// All references registered so far by this `Sema`. This is a temporary duplicate +/// of data stored in `Zcu.all_references`. It exists to avoid adding references to +/// a given `AnalUnit` multiple times. +references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, @@ -167,7 +168,7 @@ const log = std.log.scoped(.sema); const Sema = @This(); const Value = @import("Value.zig"); const MutableValue = @import("mutable_value.zig").MutableValue; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Zir = std.zig.Zir; const Zcu = @import("Zcu.zig"); @@ -186,6 +187,7 @@ const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; +const AnalUnit = InternPool.AnalUnit; const ComptimeAllocIndex = InternPool.ComptimeAllocIndex; pub const default_branch_quota = 1000; @@ -862,7 +864,6 @@ pub fn deinit(sema: *Sema) void { sema.air_extra.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); - sema.types_to_resolve.deinit(gpa); { var it = sema.post_hoc_blocks.iterator(); while (it.next()) |entry| { @@ -875,6 +876,8 @@ pub fn deinit(sema: *Sema) void { sema.base_allocs.deinit(gpa); sema.maybe_comptime_allocs.deinit(gpa); sema.comptime_allocs.deinit(gpa); + sema.exports.deinit(gpa); + sema.references.deinit(gpa); sema.* = undefined; } @@ -2067,8 +2070,8 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; @@ -2414,8 +2417,7 @@ pub fn errNote( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - const zcu = sema.mod; - return zcu.errNoteNonLazy(src.upgrade(zcu), parent, format, args); + return sema.mod.errNote(src, parent, format, args); } fn addFieldErrNote( @@ -2443,7 +2445,7 @@ pub fn errMsg( args: anytype, ) Allocator.Error!*Module.ErrorMsg { assert(src.offset != .unneeded); - return Module.ErrorMsg.create(sema.gpa, src.upgrade(sema.mod), format, args); + return Module.ErrorMsg.create(sema.gpa, src, format, args); } pub fn fail( @@ -2466,79 +2468,38 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error @setCold(true); const gpa = sema.gpa; const mod = sema.mod; - - ref: { - errdefer err_msg.destroy(gpa); - - if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { - var wip_errors: std.zig.ErrorBundle.Wip = undefined; - wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; - std.debug.print("compile error during Sema:\n", .{}); - var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; - error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); - crash_report.compilerPanic("unexpected compile error occurred", null, null); - } - - try mod.failed_decls.ensureUnusedCapacity(gpa, 1); - try mod.failed_files.ensureUnusedCapacity(gpa, 1); - - if (block) |start_block| { - var block_it = start_block; - while (block_it.inlining) |inlining| { - try sema.errNote( - inlining.call_src, - err_msg, - "called from here", - .{}, - ); - block_it = inlining.call_block; - } - - const max_references = refs: { - if (mod.comp.reference_trace) |num| break :refs num; - // Do not add multiple traces without explicit request. - if (mod.failed_decls.count() > 0) break :ref; - break :refs default_reference_trace_len; - }; - - var referenced_by = if (sema.owner_func_index != .none) - mod.funcOwnerDeclIndex(sema.owner_func_index) - else - sema.owner_decl_index; - var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa); - defer reference_stack.deinit(); - - // Avoid infinite loops. - var seen = std.AutoHashMap(InternPool.DeclIndex, void).init(gpa); - defer seen.deinit(); - - while (mod.reference_table.get(referenced_by)) |ref| { - const gop = try seen.getOrPut(ref.referencer); - if (gop.found_existing) break; - if (reference_stack.items.len < max_references) { - const decl = mod.declPtr(ref.referencer); - try reference_stack.append(.{ - .decl = decl.name, - .src_loc = ref.src.upgrade(mod), - }); - } - referenced_by = ref.referencer; - } - err_msg.reference_trace = try reference_stack.toOwnedSlice(); - err_msg.hidden_references = @intCast(seen.count() -| max_references); - } - } const ip = &mod.intern_pool; - if (sema.owner_func_index != .none) { - ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; - } else { - sema.owner_decl.analysis = .sema_failure; + + if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { + var all_references = mod.resolveReferences() catch @panic("out of memory"); + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + wip_errors.init(gpa) catch @panic("out of memory"); + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch unreachable; + std.debug.print("compile error during Sema:\n", .{}); + var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; + error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); + crash_report.compilerPanic("unexpected compile error occurred", null, null); } - if (sema.func_index != .none) { - ip.funcAnalysis(sema.func_index).state = .sema_failure; + + if (block) |start_block| { + var block_it = start_block; + while (block_it.inlining) |inlining| { + try sema.errNote( + inlining.call_src, + err_msg, + "called from here", + .{}, + ); + block_it = inlining.call_block; + } } - const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index); + + const use_ref_trace = if (mod.comp.reference_trace) |n| n > 0 else mod.failed_analysis.count() == 0; + if (use_ref_trace) { + err_msg.reference_trace_root = sema.ownerUnit().toOptional(); + } + + const gop = try mod.failed_analysis.getOrPut(gpa, sema.ownerUnit()); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. sema.err = null; @@ -2547,6 +2508,17 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error sema.err = err_msg; gop.value_ptr.* = err_msg; } + + if (sema.owner_func_index != .none) { + ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; + } else { + sema.owner_decl.analysis = .sema_failure; + } + + if (sema.func_index != .none) { + ip.funcAnalysis(sema.func_index).state = .sema_failure; + } + return error.AnalysisFail; } @@ -2561,7 +2533,6 @@ fn reparentOwnedErrorMsg( args: anytype, ) !void { const mod = sema.mod; - const resolved_src = src.upgrade(mod); const msg_str = try std.fmt.allocPrint(mod.gpa, format, args); const orig_notes = msg.notes.len; @@ -2572,7 +2543,7 @@ fn reparentOwnedErrorMsg( .msg = msg.msg, }; - msg.src_loc = resolved_src; + msg.src_loc = src; msg.msg = msg_str; } @@ -2649,7 +2620,7 @@ fn analyzeAsInt( const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); - return (try val.getUnsignedIntAdvanced(mod, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, .sema)).?; } /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, @@ -2735,12 +2706,12 @@ fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { if (!zcu.comp.debug_incremental) return false; const decl_index = Type.fromInterned(ty).getOwnerDecl(zcu); - const decl_as_depender = InternPool.AnalSubject.wrap(.{ .decl = decl_index }); + const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index }); const was_outdated = zcu.outdated.swapRemove(decl_as_depender) or zcu.potentially_outdated.swapRemove(decl_as_depender); if (!was_outdated) return false; _ = zcu.outdated_ready.swapRemove(decl_as_depender); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(ty); zcu.declPtr(decl_index).analysis = .dependency_failure; try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); @@ -2834,7 +2805,7 @@ fn zirStructDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( sema.gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -2853,6 +2824,8 @@ fn zirStructDecl( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3068,7 +3041,7 @@ fn zirEnumDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3334,7 +3307,7 @@ fn zirUnionDecl( if (sema.mod.comp.debug_incremental) { try mod.intern_pool.addDependency( sema.gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst) }, ); } @@ -3353,7 +3326,8 @@ fn zirUnionDecl( } try mod.finalizeAnonDecl(new_decl_index); - + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3422,7 +3396,7 @@ fn zirOpaqueDecl( if (sema.mod.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.AnalSubject.wrap(.{ .decl = new_decl_index }), + AnalUnit.wrap(.{ .decl = new_decl_index }), .{ .src_hash = try ip.trackZir(gpa, block.getFileScope(mod), inst) }, ); } @@ -3478,12 +3452,12 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { defer tracy.end(); if (block.is_comptime or try sema.typeRequiresComptime(sema.fn_ret_ty)) { - try sema.resolveTypeFields(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveFields(sema.mod); return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = sema.fn_ret_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -3492,7 +3466,6 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { // We are inlining a function call; this should be emitted as an alloc, not a ret_ptr. // TODO when functions gain result location support, the inlining struct in // Block should contain the return pointer, and we would pass that through here. - try sema.queueFullTypeResolution(sema.fn_ret_ty); return block.addTy(.alloc, ptr_type); } @@ -3688,8 +3661,8 @@ fn zirAllocExtended( try sema.validateVarType(block, ty_src, var_ty, false); } const target = sema.mod.getTarget(); - try sema.resolveTypeLayout(var_ty); - const ptr_type = try sema.ptrType(.{ + try var_ty.resolveLayout(sema.mod); + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .alignment = alignment, @@ -3923,7 +3896,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const idx_val = (try sema.resolveValue(data.rhs)).?; break :blk .{ data.lhs, - .{ .elem = try idx_val.toUnsignedIntAdvanced(sema) }, + .{ .elem = try idx_val.toUnsignedIntSema(zcu) }, }; }, .bitcast => .{ @@ -3961,7 +3934,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = payload_val.toIntern(), } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(opt_val), opt_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrOptPayload(zcu)).toIntern(); }, .eu_payload => ptr: { // Set the error union to non-error at comptime. @@ -3974,7 +3947,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, .val = .{ .payload = payload_val.toIntern() }, } }); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), Value.fromInterned(eu_val), eu_ty); - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrEuPayload(zcu)).toIntern(); }, .field => |idx| ptr: { const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu); @@ -3988,9 +3961,9 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, const store_val = try zcu.unionValue(maybe_union_ty, tag_val, payload_val); try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty); } - break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, sema)).toIntern(); + break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, zcu)).toIntern(); }, - .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, sema)).toIntern(), + .elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, zcu)).toIntern(), }; try ptr_mapping.put(air_ptr, new_ptr); } @@ -4081,7 +4054,7 @@ fn finishResolveComptimeKnownAllocPtr( fn makePtrTyConst(sema: *Sema, ptr_ty: Type) CompileError!Type { var ptr_info = ptr_ty.ptrInfo(sema.mod); ptr_info.flags.is_const = true; - return sema.ptrType(ptr_info); + return sema.mod.ptrTypeSema(ptr_info); } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { @@ -4124,11 +4097,10 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I return sema.analyzeComptimeAlloc(block, var_ty, .none); } const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); const ptr = try block.addTy(.alloc, ptr_type); const ptr_inst = ptr.toIndex().?; try sema.maybe_comptime_allocs.put(sema.gpa, ptr_inst, .{ .runtime_index = block.runtime_index }); @@ -4148,11 +4120,10 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.validateVarType(block, ty_src, var_ty, false); const target = sema.mod.getTarget(); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try sema.mod.ptrTypeSema(.{ .child = var_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); - try sema.queueFullTypeResolution(var_ty); return block.addTy(.alloc, ptr_type); } @@ -4229,6 +4200,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (mod.intern_pool.isFuncBody(val)) { const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); if (try sema.fnHasRuntimeBits(ty)) { + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = val })); try mod.ensureFuncBodyAnalysisQueued(val); } } @@ -4247,7 +4219,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_vals, .none); - const final_ptr_ty = try sema.ptrType(.{ + const final_ptr_ty = try mod.ptrTypeSema(.{ .child = final_elem_ty.toIntern(), .flags = .{ .alignment = ia1.alignment, @@ -4267,7 +4239,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be // fully resolved so it can be lowered in codegen. - try sema.resolveTypeFully(final_elem_ty); + try final_elem_ty.resolveFully(mod); return; } @@ -4279,8 +4251,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com return sema.fail(block, src, "value with comptime-only type '{}' depends on runtime control flow", .{final_elem_ty.fmt(mod)}); } - try sema.queueFullTypeResolution(final_elem_ty); - // Change it to a normal alloc. sema.air_instructions.set(@intFromEnum(ptr_inst), .{ .tag = .alloc, @@ -4653,7 +4623,7 @@ fn validateArrayInitTy( return; }, .Struct => if (ty.isTuple(mod)) { - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const array_len = ty.arrayLen(mod); if (init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ @@ -4931,7 +4901,7 @@ fn validateStructInit( if (block.is_comptime and (try sema.resolveDefinedValue(block, init_src, struct_ptr)) != null) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // In this case the only thing we need to do is evaluate the implicit // store instructions for default field values, and report any missing fields. // Avoid the cost of the extra machinery for detecting a comptime struct init value. @@ -4939,7 +4909,7 @@ fn validateStructInit( const i: u32 = @intCast(i_usize); if (field_ptr != .none) continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.toIntern() == .unreachable_value) { const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse { @@ -4988,7 +4958,7 @@ fn validateStructInit( const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); // We collect the comptime field values in case the struct initialization // ends up being comptime-known. @@ -5147,7 +5117,7 @@ fn validateStructInit( try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); // Our task is to insert `store` instructions for all the default field values. for (found_fields, 0..) |field_ptr, i| { @@ -5192,7 +5162,7 @@ fn zirValidatePtrArrayInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod).toIntern(); @@ -5261,7 +5231,7 @@ fn zirValidatePtrArrayInit( if (array_ty.isTuple(mod)) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv.toIntern(); continue; @@ -5601,7 +5571,7 @@ fn storeToInferredAllocComptime( .needed_comptime_reason = "value being stored to a comptime variable must be comptime-known", }); }; - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try zcu.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .alignment = iac.alignment, @@ -5708,7 +5678,7 @@ fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { const mod = sema.mod; - const ptr_ty = (try sema.ptrType(.{ + const ptr_ty = (try mod.ptrTypeSema(.{ .child = mod.intern_pool.typeOf(val), .flags = .{ .alignment = .none, @@ -5817,11 +5787,7 @@ fn zirCompileLog( } try writer.print("\n", .{}); - const decl_index = if (sema.func_index != .none) - mod.funcOwnerDeclIndex(sema.func_index) - else - sema.owner_decl_index; - const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); + const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.ownerUnit()); if (!gop.found_existing) gop.value_ptr.* = .{ .base_node_inst = block.src_base_inst, .node_offset = src_node, @@ -5974,7 +5940,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!comp.config.link_libc) try sema.errNote(src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try mod.cimport_errors.getOrPut(gpa, sema.owner_decl_index); + const gop = try mod.cimport_errors.getOrPut(gpa, sema.ownerUnit()); if (!gop.found_existing) { gop.value_ptr.* = c_import_res.errors; c_import_res.errors = std.zig.ErrorBundle.empty; @@ -6393,6 +6359,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } else try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); { + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); if (exported_decl.val.getFunction(mod)) |function| { @@ -6423,10 +6390,9 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.analyzeExport(block, src, options, decl_index); } - try addExport(mod, .{ + try sema.exports.append(mod.gpa, .{ .opts = options, .src = src, - .owner_decl = sema.owner_decl_index, .exported = .{ .value = operand.toIntern() }, .status = .in_progress, }); @@ -6445,6 +6411,7 @@ pub fn analyzeExport( if (options.linkage == .internal) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = exported_decl_index })); try sema.ensureDeclAnalyzed(exported_decl_index); const exported_decl = mod.declPtr(exported_decl_index); const export_ty = exported_decl.typeOf(mod); @@ -6467,48 +6434,16 @@ pub fn analyzeExport( return sema.fail(block, src, "export target cannot be extern", .{}); } - try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, exported_decl_index); - try addExport(mod, .{ + try sema.exports.append(gpa, .{ .opts = options, .src = src, - .owner_decl = sema.owner_decl_index, .exported = .{ .decl_index = exported_decl_index }, .status = .in_progress, }); } -fn addExport(mod: *Module, export_init: Module.Export) error{OutOfMemory}!void { - const gpa = mod.gpa; - - try mod.decl_exports.ensureUnusedCapacity(gpa, 1); - try mod.value_exports.ensureUnusedCapacity(gpa, 1); - try mod.export_owners.ensureUnusedCapacity(gpa, 1); - - const new_export = try gpa.create(Module.Export); - errdefer gpa.destroy(new_export); - - new_export.* = export_init; - - const eo_gop = mod.export_owners.getOrPutAssumeCapacity(export_init.owner_decl); - if (!eo_gop.found_existing) eo_gop.value_ptr.* = .{}; - try eo_gop.value_ptr.append(gpa, new_export); - errdefer _ = eo_gop.value_ptr.pop(); - - switch (export_init.exported) { - .decl_index => |decl_index| { - const de_gop = mod.decl_exports.getOrPutAssumeCapacity(decl_index); - if (!de_gop.found_existing) de_gop.value_ptr.* = .{}; - try de_gop.value_ptr.append(gpa, new_export); - }, - .value => |value| { - const ve_gop = mod.value_exports.getOrPutAssumeCapacity(value); - if (!ve_gop.found_existing) ve_gop.value_ptr.* = .{}; - try ve_gop.value_ptr.append(gpa, new_export); - }, - } -} - fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; @@ -6700,8 +6635,6 @@ fn addDbgVar( // real `block` instruction. if (block.need_debug_scope) |ptr| ptr.* = true; - try sema.queueFullTypeResolution(operand_ty); - // Add the name to the AIR. const name_extra_index: u32 = @intCast(sema.air_extra.items.len); const elements_used = name.len / 4 + 1; @@ -6730,8 +6663,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .no_embedded_nulls, ); const decl_index = try sema.lookupIdentifier(block, src, decl_name); - try sema.addReferencedBy(src, decl_index); - return sema.analyzeDeclRef(decl_index); + return sema.analyzeDeclRef(src, decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6888,14 +6820,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref if (!block.ownerModule().error_tracing) return .none; - const stack_trace_ty = sema.getBuiltinType("StackTrace") catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; - sema.resolveTypeFields(stack_trace_ty) catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, - else => |e| return e, - }; + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, LazySrcLoc.unneeded) catch |err| switch (err) { error.AnalysisFail => @panic("std.builtin.StackTrace is corrupt"), @@ -6935,8 +6861,8 @@ fn popErrorReturnTrace( // AstGen determined this result does not go to an error-handling expr (try/catch/return etc.), or // the result is comptime-known to be a non-error. Either way, pop unconditionally. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -6961,8 +6887,8 @@ fn popErrorReturnTrace( defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_name = try mod.intern_pool.getOrPutString(gpa, "index", .no_embedded_nulls); @@ -7088,8 +7014,8 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. if (input_is_error or (pop_error_return_trace and return_ty.isError(mod))) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index", .no_embedded_nulls); const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); @@ -7320,10 +7246,6 @@ const CallArgsInfo = union(enum) { ) CompileError!Air.Inst.Ref { const mod = sema.mod; const param_count = func_ty_info.param_types.len; - if (maybe_param_ty) |param_ty| switch (param_ty.toIntern()) { - .generic_poison_type => {}, - else => try sema.queueFullTypeResolution(param_ty), - }; const uncoerced_arg: Air.Inst.Ref = switch (cai) { inline .resolved, .call_builtin => |resolved| resolved.args[arg_index], .zir_call => |zir_call| arg_val: { @@ -7550,24 +7472,19 @@ fn analyzeCall( const gpa = sema.gpa; - var is_generic_call = func_ty_info.is_generic; + const is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; var is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_inline_call and !is_comptime_call) { - if (sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) |ct| { - is_comptime_call = ct; - is_inline_call = ct; - if (ct) { - comptime_reason = &.{ .comptime_ret_ty = .{ - .func = func, - .func_src = func_src, - .return_ty = Type.fromInterned(func_ty_info.return_type), - } }; - } - } else |err| switch (err) { - error.GenericPoison => is_generic_call = true, - else => |e| return e, + if (try sema.typeRequiresComptime(Type.fromInterned(func_ty_info.return_type))) { + is_comptime_call = true; + is_inline_call = true; + comptime_reason = &.{ .comptime_ret_ty = .{ + .func = func, + .func_src = func_src, + .return_ty = Type.fromInterned(func_ty_info.return_type), + } }; } } @@ -7927,13 +7844,13 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) { ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } if (try sema.resolveValue(func)) |func_val| { if (mod.intern_pool.isFuncBody(func_val.toIntern())) { + try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() })); try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern()); } } @@ -8336,7 +8253,6 @@ fn instantiateGenericCall( } } else { // The parameter is runtime-known. - try sema.queueFullTypeResolution(arg_ty); child_sema.inst_map.putAssumeCapacityNoClobber(param_inst, try child_block.addInst(.{ .tag = .arg, .data = .{ .arg = .{ @@ -8370,8 +8286,6 @@ fn instantiateGenericCall( const callee = mod.funcInfo(callee_index); callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); - try sema.addReferencedBy(call_src, callee.owner_decl); - // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = Type.fromInterned(callee.ty); const func_ty_info = mod.typeToFunc(func_ty).?; @@ -8387,8 +8301,6 @@ fn instantiateGenericCall( return error.GenericPoison; } - try sema.queueFullTypeResolution(Type.fromInterned(func_ty_info.return_type)); - if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); if (sema.owner_func_index != .none and @@ -8397,6 +8309,7 @@ fn instantiateGenericCall( ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } + try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); try mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len); @@ -8411,6 +8324,9 @@ fn instantiateGenericCall( }); sema.appendRefsAssumeCapacity(runtime_args.items); + // `child_sema` is owned by us, so just take its exports. + try sema.exports.appendSlice(sema.gpa, child_sema.exports.items); + if (ensure_result_used) { try sema.ensureResultUsed(block, sema.typeOf(result), call_src); } @@ -8476,7 +8392,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil else => |e| return e, }; const indexable_ty = maybe_wrapped_indexable_ty.optEuBaseType(mod); - try sema.resolveTypeFields(indexable_ty); + try indexable_ty.resolveFields(mod); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), mod); @@ -8740,7 +8656,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand = try sema.coerce(block, err_int_ty, uncasted_operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntAdvanced(sema)); + const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(mod)); if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try mod.intern(.{ .err = .{ @@ -8844,7 +8760,7 @@ fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const tag_ty = operand_ty.unionTagType(mod) orelse { return sema.fail( block, @@ -8986,7 +8902,7 @@ fn analyzeOptionalPayloadPtr( } const child_type = opt_type.optionalChild(zcu); - const child_pointer = try sema.ptrType(.{ + const child_pointer = try zcu.ptrTypeSema(.{ .child = child_type.toIntern(), .flags = .{ .is_const = optional_ptr_ty.isConstPtr(zcu), @@ -9010,13 +8926,13 @@ fn analyzeOptionalPayloadPtr( const opt_payload_ptr = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); try sema.checkKnownAllocPtr(block, optional_ptr, opt_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(zcu)) { return sema.fail(block, src, "unable to unwrap null", .{}); } - return Air.internedToRef((try ptr_val.ptrOptPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrOptPayload(zcu)).toIntern()); } } @@ -9059,7 +8975,7 @@ fn zirOptionalPayload( // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .flags = .{ .alignment = ptr_info.flags.alignment, @@ -9177,7 +9093,7 @@ fn analyzeErrUnionPayloadPtr( const err_union_ty = operand_ty.childType(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - const operand_pointer_ty = try sema.ptrType(.{ + const operand_pointer_ty = try zcu.ptrTypeSema(.{ .child = payload_ty.toIntern(), .flags = .{ .is_const = operand_ty.isConstPtr(zcu), @@ -9202,13 +9118,13 @@ fn analyzeErrUnionPayloadPtr( const eu_payload_ptr = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); try sema.checkKnownAllocPtr(block, operand, eu_payload_ptr); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { if (val.getErrorName(zcu).unwrap()) |name| { return sema.failWithComptimeErrorRetTrace(block, src, name); } - return Air.internedToRef((try ptr_val.ptrEuPayload(sema)).toIntern()); + return Air.internedToRef((try ptr_val.ptrEuPayload(zcu)).toIntern()); } } @@ -9656,17 +9572,8 @@ fn funcCommon( } } - var ret_ty_requires_comptime = false; - const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { - ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.isGenericPoison(); - } else |err| switch (err) { - error.GenericPoison => rp: { - is_generic = true; - break :rp true; - }, - else => |e| return e, - }; + const ret_ty_requires_comptime = try sema.typeRequiresComptime(bare_return_type); + const ret_poison = bare_return_type.isGenericPoison(); const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; const param_types = block.params.items(.ty); @@ -10014,8 +9921,8 @@ fn finishFunc( if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can // lower this fn type. - const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(unresolved_stack_trace_ty); + const unresolved_stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try unresolved_stack_trace_ty.resolveFields(mod); } return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); @@ -10074,21 +9981,7 @@ fn zirParam( } }; - const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { - error.GenericPoison => { - // The type is not available until the generic instantiation. - // We result the param instruction with a poison value and - // insert an anytype parameter. - try block.params.append(sema.arena, .{ - .ty = .generic_poison_type, - .is_comptime = comptime_syntax, - .name = param_name, - }); - sema.inst_map.putAssumeCapacity(inst, .generic_poison); - return; - }, - else => |e| return e, - } or comptime_syntax; + const is_comptime = try sema.typeRequiresComptime(param_ty) or comptime_syntax; try block.params.append(sema.arena, .{ .ty = param_ty.toIntern(), @@ -10215,7 +10108,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } return Air.internedToRef((try zcu.intValue( Type.usize, - (try operand_val.getUnsignedIntAdvanced(zcu, sema)).?, + (try operand_val.getUnsignedIntAdvanced(zcu, .sema)).?, )).toIntern()); } const len = operand_ty.vectorLen(zcu); @@ -10227,7 +10120,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! new_elem.* = (try zcu.undefValue(Type.usize)).toIntern(); continue; } - const addr = try ptr_val.getUnsignedIntAdvanced(zcu, sema) orelse { + const addr = try ptr_val.getUnsignedIntAdvanced(zcu, .sema) orelse { // A vector element wasn't an integer pointer. This is a runtime operation. break :ct; }; @@ -11100,7 +10993,7 @@ const SwitchProngAnalysis = struct { const union_obj = zcu.typeToUnion(operand_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); if (capture_byref) { - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !operand_ptr_ty.ptrIsMutable(zcu), @@ -11109,7 +11002,7 @@ const SwitchProngAnalysis = struct { }, }); if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { - return Air.internedToRef((try union_ptr.ptrField(field_index, sema)).toIntern()); + return Air.internedToRef((try union_ptr.ptrField(field_index, zcu)).toIntern()); } return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); } else { @@ -11203,7 +11096,7 @@ const SwitchProngAnalysis = struct { const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len); for (field_indices, dummy_captures) |field_idx, *dummy| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try zcu.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = operand_ptr_info.flags.is_const, @@ -11239,7 +11132,7 @@ const SwitchProngAnalysis = struct { if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { if (op_ptr_val.isUndef(zcu)) return zcu.undefRef(capture_ptr_ty); - const field_ptr_val = try op_ptr_val.ptrField(first_field_index, sema); + const field_ptr_val = try op_ptr_val.ptrField(first_field_index, zcu); return Air.internedToRef((try zcu.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } @@ -11452,7 +11345,7 @@ fn switchCond( }, .Union => { - try sema.resolveTypeFields(operand_ty); + try operand_ty.resolveFields(mod); const enum_ty = operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(src, "switch on union with no attached enum", .{}); @@ -13744,7 +13637,7 @@ fn maybeErrorUnwrap( return true; } - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try mod.getBuiltin("panicUnwrapError"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [2]Air.Inst.Ref = .{ err_return_trace, operand }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13754,7 +13647,7 @@ fn maybeErrorUnwrap( const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const msg_inst = try sema.resolveInst(inst_data.operand); - const panic_fn = try sema.getBuiltin("panic"); + const panic_fn = try mod.getBuiltin("panic"); const err_return_trace = try sema.getErrorReturnTrace(block); const args: [3]Air.Inst.Ref = .{ msg_inst, err_return_trace, .null_value }; try sema.callBuiltin(block, operand_src, panic_fn, .auto, &args, .@"safety check"); @@ -13819,7 +13712,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, .{ .needed_comptime_reason = "field name must be comptime-known", }); - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const ip = &mod.intern_pool; const has_field = hf: { @@ -13934,7 +13827,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, operand_src, "file path name cannot be empty", .{}); } - const val = mod.embedFile(block.getFileScope(mod), name, operand_src.upgrade(mod)) catch |err| switch (err) { + const val = mod.embedFile(block.getFileScope(mod), name, operand_src) catch |err| switch (err) { error.ImportOutsideModulePath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, @@ -13999,7 +13892,7 @@ fn zirShl( return mod.undefRef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { @@ -14164,7 +14057,7 @@ fn zirShr( return mod.undefRef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { @@ -14211,7 +14104,7 @@ fn zirShr( if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); - if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { + if (!(try truncated.compareAllWithZeroSema(.eq, mod))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } @@ -14635,12 +14528,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = resolved_elem_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -14723,7 +14616,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins .none => null, else => Value.fromInterned(ptr_info.sentinel), }, - .len = try val.sliceLen(sema), + .len = try val.sliceLen(mod), }; }, .One => { @@ -14965,12 +14858,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_addrspace) |ptr_as| { - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = ptr_as }, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = lhs_info.elem_type.toIntern(), .flags = .{ .address_space = ptr_as }, }); @@ -15158,7 +15051,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15173,7 +15066,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15294,7 +15187,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15309,7 +15202,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15461,7 +15354,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15476,7 +15369,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } // TODO: if the RHS is one, return the LHS directly @@ -15571,7 +15464,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15586,7 +15479,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -15811,7 +15704,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), @@ -15830,18 +15723,18 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { const rem_result = try sema.intRem(resolved_type, lhs_val, rhs_val); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. - if (!(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) and - !(try rem_result.compareAllWithZeroAdvanced(.eq, sema))) + if (!(try lhs_val.compareAllWithZeroSema(.gte, mod)) and + !(try rem_result.compareAllWithZeroSema(.eq, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } @@ -15859,14 +15752,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroSema(.gte, mod))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return Air.internedToRef((try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod)).toIntern()); @@ -15917,8 +15810,8 @@ fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileErr // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -15994,7 +15887,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -16010,7 +15903,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16089,7 +15982,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { @@ -16105,7 +15998,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } - if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { + if (!(try rhs_val.compareAllWithZeroSema(.neq, mod))) { return sema.failWithDivideByZero(block, rhs_src); } } @@ -16192,12 +16085,12 @@ fn zirOverflowArithmetic( // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16218,7 +16111,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; - } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + } else if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { @@ -16237,7 +16130,7 @@ fn zirOverflowArithmetic( const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; @@ -16247,7 +16140,7 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; @@ -16271,12 +16164,12 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroSema(.eq, mod))) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } @@ -16427,7 +16320,7 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16439,7 +16332,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16471,7 +16364,7 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16479,7 +16372,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16492,7 +16385,7 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroSema(.eq, mod))) { return casted_rhs; } } @@ -16500,7 +16393,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -16541,7 +16434,7 @@ fn analyzeArithmetic( return mod.undefRef(resolved_type); } } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16576,7 +16469,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16597,7 +16490,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { return casted_lhs; } } @@ -16644,7 +16537,7 @@ fn analyzeArithmetic( if (lhs_val.isNan(mod)) { return Air.internedToRef(lhs_val.toIntern()); } - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) lz: { if (maybe_rhs_val) |rhs_val| { if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); @@ -16675,7 +16568,7 @@ fn analyzeArithmetic( if (rhs_val.isNan(mod)) { return Air.internedToRef(rhs_val.toIntern()); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) rz: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isInf(mod)) { return Air.internedToRef((try mod.floatValue(resolved_type, std.math.nan(f128))).toIntern()); @@ -16727,7 +16620,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16740,7 +16633,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16772,7 +16665,7 @@ fn analyzeArithmetic( }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { - if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try lhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16785,7 +16678,7 @@ fn analyzeArithmetic( if (rhs_val.isUndef(mod)) { return mod.undefRef(resolved_type); } - if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + if (try rhs_val.compareAllWithZeroSema(.eq, mod)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return Air.internedToRef(zero_val.toIntern()); } @@ -16881,7 +16774,7 @@ fn analyzePtrArithmetic( const new_ptr_ty = t: { // Calculate the new pointer alignment. - // This code is duplicated in `elemPtrType`. + // This code is duplicated in `Type.elemPtrType`. if (ptr_info.flags.alignment == .none) { // ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness. break :t ptr_ty; @@ -16890,7 +16783,7 @@ fn analyzePtrArithmetic( // it being a multiple of the type size. const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntAdvanced(sema)); + const off_int = try sema.usizeCast(block, offset_src, try off_val.toUnsignedIntSema(mod)); break :a elem_size * off_int; } else elem_size; @@ -16903,7 +16796,7 @@ fn analyzePtrArithmetic( )); assert(new_align != .none); - break :t try sema.ptrType(.{ + break :t try mod.ptrTypeSema(.{ .child = ptr_info.child, .sentinel = ptr_info.sentinel, .flags = .{ @@ -16922,14 +16815,14 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef(mod)) return mod.undefRef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntAdvanced(sema)); + const offset_int = try sema.usizeCast(block, offset_src, try offset_val.toUnsignedIntSema(mod)); if (offset_int == 0) return ptr; if (air_tag == .ptr_sub) { const elem_size = try sema.typeAbiSize(Type.fromInterned(ptr_info.child)); const new_ptr_val = try sema.ptrSubtract(block, op_src, ptr_val, offset_int * elem_size, new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } else { - const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, sema), new_ptr_ty); + const new_ptr_val = try mod.getCoerced(try ptr_val.ptrElem(offset_int, mod), new_ptr_ty); return Air.internedToRef(new_ptr_val.toIntern()); } } else break :rs offset_src; @@ -17028,7 +16921,6 @@ fn zirAsm( // Indicate the output is the asm instruction return value. arg.* = .none; const out_ty = try sema.resolveType(block, ret_ty_src, output.data.operand); - try sema.queueFullTypeResolution(out_ty); expr_ty = Air.internedToRef(out_ty.toIntern()); } else { arg.* = try sema.resolveInst(output.data.operand); @@ -17063,7 +16955,6 @@ fn zirAsm( .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; - try sema.queueFullTypeResolution(uncasted_arg_ty); }, } @@ -17222,7 +17113,7 @@ fn analyzeCmpUnionTag( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const union_ty = sema.typeOf(un); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); @@ -17438,9 +17329,6 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. => {}, } const val = try ty.lazyAbiSize(mod); - if (val.isLazySize(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -17480,7 +17368,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, .sema); return mod.intRef(Type.comptime_int, bit_size); } @@ -17507,7 +17395,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat .@"comptime" => |index| return Air.internedToRef(index), .runtime => |index| index, .decl_val => |decl_index| return sema.analyzeDeclVal(block, src, decl_index), - .decl_ref => |decl_index| return sema.analyzeDeclRef(decl_index), + .decl_ref => |decl_index| return sema.analyzeDeclRef(src, decl_index), }; // The comptime case is handled already above. Runtime case below. @@ -17666,7 +17554,7 @@ fn zirBuiltinSrc( } }); }; - const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const src_loc_ty = try mod.getBuiltinType("SourceLocation"); const fields = .{ // file: [:0]const u8, file_name_val, @@ -17690,7 +17578,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty = try sema.resolveType(block, src, inst_data.operand); - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const type_info_tag_ty = type_info_ty.unionTagType(mod).?; if (ty.typeDeclInst(mod)) |type_decl_inst| { @@ -17771,7 +17659,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = param_info_ty.toIntern(), .flags = .{ .size = .Slice, @@ -17801,7 +17689,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai func_ty_info.return_type, } }); - const callconv_ty = try sema.getBuiltinType("CallingConvention"); + const callconv_ty = try mod.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, @@ -17835,7 +17723,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const int_info_decl = mod.declPtr(int_info_decl_index); const int_info_ty = int_info_decl.val.toType(); - const signedness_ty = try sema.getBuiltinType("Signedness"); + const signedness_ty = try mod.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = .{ // signedness: Signedness, @@ -17883,12 +17771,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try Type.fromInterned(info.child).lazyAbiAlignment(mod); - const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const addrspace_ty = try mod.getBuiltinType("AddressSpace"); const pointer_ty = t: { const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "Pointer", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18037,8 +17925,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(error_field_ty); - // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise @@ -18089,7 +17975,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; // Build our ?[]const Error value - const slice_errors_ty = try sema.ptrType(.{ + const slice_errors_ty = try mod.ptrTypeSema(.{ .child = error_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18235,7 +18121,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = enum_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18315,7 +18201,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t union_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); const layout = union_obj.getLayout(ip); @@ -18351,7 +18237,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const alignment = switch (layout) { - .auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(field_index)), + .auto, .@"extern" => try mod.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(field_index), .sema), .@"packed" => .none, }; @@ -18379,7 +18265,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = union_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18412,7 +18298,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18465,7 +18351,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t struct_field_ty_decl.val.toType(); }; - try sema.resolveTypeLayout(ty); // Getting alignment requires type layout + try ty.resolveLayout(mod); // Getting alignment requires type layout var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); @@ -18505,7 +18391,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - try sema.resolveTypeLayout(Type.fromInterned(field_ty)); + try Type.fromInterned(field_ty).resolveLayout(mod); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) Value.fromInterned(field_val) else null; @@ -18534,7 +18420,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(mod); for (struct_field_vals, 0..) |*field_val, field_index| { const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| @@ -18573,10 +18459,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(opt_default_val); const alignment = switch (struct_type.layout) { .@"packed" => .none, - else => try sema.structFieldAlignment( + else => try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), field_ty, struct_type.layout, + .sema, ), }; @@ -18608,7 +18495,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = struct_field_ty.toIntern(), .flags = .{ .size = .Slice, @@ -18644,7 +18531,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decl_index = (try sema.namespaceLookup( block, src, - (try sema.getBuiltinType("Type")).getNamespaceIndex(mod), + (try mod.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, "ContainerLayout", .no_embedded_nulls), )).?; try sema.ensureDeclAnalyzed(decl_index); @@ -18688,7 +18575,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t type_opaque_ty_decl.val.toType(); }; - try sema.resolveTypeFields(ty); + try ty.resolveFields(mod); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespaceIndex(mod)); const field_values = .{ @@ -18730,7 +18617,6 @@ fn typeInfoDecls( const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(declaration_ty); var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); @@ -18748,7 +18634,7 @@ fn typeInfoDecls( .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } }); - const slice_ty = (try sema.ptrType(.{ + const slice_ty = (try mod.ptrTypeSema(.{ .child = declaration_ty.toIntern(), .flags = .{ .size = .Slice, @@ -19348,7 +19234,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); - const res_ty = try sema.ptrType(.{ + const res_ty = try mod.ptrTypeSema(.{ .child = err_union_ty.errorUnionPayload(mod).toIntern(), .flags = .{ .is_const = ptr_info.flags.is_const, @@ -19581,11 +19467,11 @@ fn retWithErrTracing( else => true, }; const gpa = sema.gpa; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const return_err_fn = try sema.getBuiltin("returnError"); + const return_err_fn = try mod.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; if (!need_check) { @@ -19788,7 +19674,7 @@ fn analyzeRet( return sema.failWithOwnedErrorMsg(block, msg); } - try sema.resolveTypeLayout(sema.fn_ret_ty); + try sema.fn_ret_ty.resolveLayout(mod); try sema.validateRuntimeValue(block, operand_src, operand); @@ -19870,7 +19756,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, else => {}, } - const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?; + const align_bytes = (try val.getUnsignedIntAdvanced(mod, .sema)).?; break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes); } else .none; @@ -19904,7 +19790,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air elem_ty.fmt(mod), bit_offset, bit_offset - host_size * 8, host_size, }); } - const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, sema); + const elem_bit_size = try elem_ty.bitSizeAdvanced(mod, .sema); if (elem_bit_size > host_size * 8 - bit_offset) { return sema.fail(block, bitoffset_src, "packed type '{}' at bit offset {} ends {} bits after the end of a {} byte host integer", .{ elem_ty.fmt(mod), bit_offset, elem_bit_size - (host_size * 8 - bit_offset), host_size, @@ -19945,7 +19831,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }); } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = sentinel, .flags = .{ @@ -20036,7 +19922,7 @@ fn structInitEmpty( const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); // The init values to use for the struct instance. const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); @@ -20107,7 +19993,6 @@ fn unionInit( try sema.requireRuntimeBlock(block, init_src, null); _ = union_ty_src; - try sema.queueFullTypeResolution(union_ty); return block.addUnionInit(union_ty, field_index, init); } @@ -20136,7 +20021,7 @@ fn zirStructInit( else => |e| return e, }; const resolved_ty = result_ty.optEuBaseType(mod); - try sema.resolveTypeLayout(resolved_ty); + try resolved_ty.resolveLayout(mod); if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. @@ -20177,7 +20062,7 @@ fn zirStructInit( const field_ty = resolved_ty.structFieldType(field_index, mod); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (!is_packed) { - try sema.resolveStructFieldInits(resolved_ty); + try resolved_ty.resolveStructFieldInits(mod); if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ @@ -20250,7 +20135,7 @@ fn zirStructInit( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20264,7 +20149,6 @@ fn zirStructInit( } try sema.requireRuntimeBlock(block, src, null); - try sema.queueFullTypeResolution(resolved_ty); const union_val = try block.addUnionInit(resolved_ty, field_index, init_inst); return sema.coerce(block, result_ty, union_val, src); } @@ -20341,7 +20225,7 @@ fn finishStructInit( continue; } - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { @@ -20411,9 +20295,9 @@ fn finishStructInit( } if (is_ref) { - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveLayout(mod); const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20433,8 +20317,7 @@ fn finishStructInit( .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(runtime_index), } })); - try sema.resolveStructFieldInits(struct_ty); - try sema.queueFullTypeResolution(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const struct_val = try block.addAggregateInit(struct_ty, field_inits); return sema.coerce(block, result_ty, struct_val, init_src); } @@ -20543,7 +20426,7 @@ fn structInitAnon( if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20557,7 +20440,7 @@ fn structInitAnon( }; extra_index = item.end; - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = field_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20650,7 +20533,7 @@ fn zirArrayInit( dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src); if (is_tuple) { if (array_ty.structFieldIsComptime(i, mod)) - try sema.resolveStructFieldInits(array_ty); + try array_ty.resolveStructFieldInits(mod); if (try array_ty.structFieldValueComptime(mod, i)) |field_val| { const init_val = try sema.resolveValue(dest.*) orelse { return sema.failWithNeededComptime(block, elem_src, .{ @@ -20694,11 +20577,10 @@ fn zirArrayInit( .init_node_offset = src.offset.node_offset.x, .elem_index = runtime_index, } })); - try sema.queueFullTypeResolution(array_ty); if (is_ref) { const target = mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = result_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20707,7 +20589,7 @@ fn zirArrayInit( if (is_tuple) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.structFieldType(i, mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20720,7 +20602,7 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try sema.ptrType(.{ + const elem_ptr_ty = try mod.ptrTypeSema(.{ .child = array_ty.elemType2(mod).toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20808,14 +20690,14 @@ fn arrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); - const alloc_ty = try sema.ptrType(.{ + const alloc_ty = try mod.ptrTypeSema(.{ .child = tuple_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); for (operands, 0..) |operand, i_usize| { const i: u32 = @intCast(i_usize); - const field_ptr_ty = try sema.ptrType(.{ + const field_ptr_ty = try mod.ptrTypeSema(.{ .child = types[i], .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); @@ -20885,7 +20767,7 @@ fn fieldType( const ip = &mod.intern_pool; var cur_ty = aggregate_ty; while (true) { - try sema.resolveTypeFields(cur_ty); + try cur_ty.resolveFields(mod); switch (cur_ty.zigTypeTag(mod)) { .Struct => switch (ip.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { @@ -20936,8 +20818,8 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); @@ -20971,9 +20853,6 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod); - if (val.isLazyAlign(mod)) { - try sema.queueFullTypeResolution(ty); - } return Air.internedToRef(val.toIntern()); } @@ -21148,7 +21027,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(operand_ty); + try operand_ty.resolveLayout(mod); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); @@ -21224,7 +21103,7 @@ fn zirReify( }, }, }; - const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_ty = try mod.getBuiltinType("Type"); const uncasted_operand = try sema.resolveInst(extra.operand); const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstDefinedValue(block, operand_src, type_info, .{ @@ -21258,7 +21137,7 @@ fn zirReify( ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = try mod.intType(signedness, bits); return Air.internedToRef(ty.toIntern()); }, @@ -21273,7 +21152,7 @@ fn zirReify( try ip.getOrPutString(gpa, "child", .no_embedded_nulls), ).?); - const len: u32 = @intCast(try len_val.toUnsignedIntAdvanced(sema)); + const len: u32 = @intCast(try len_val.toUnsignedIntSema(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -21291,7 +21170,7 @@ fn zirReify( try ip.getOrPutString(gpa, "bits", .no_embedded_nulls), ).?); - const bits: u16 = @intCast(try bits_val.toUnsignedIntAdvanced(sema)); + const bits: u16 = @intCast(try bits_val.toUnsignedIntSema(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -21341,7 +21220,7 @@ fn zirReify( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?; + const alignment_val_int = (try alignment_val.getUnsignedIntAdvanced(mod, .sema)).?; if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) { return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int}); } @@ -21349,7 +21228,7 @@ fn zirReify( const elem_ty = child_val.toType(); if (abi_align != .none) { - try sema.resolveTypeLayout(elem_ty); + try elem_ty.resolveLayout(mod); } const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); @@ -21393,7 +21272,7 @@ fn zirReify( } } - const ty = try sema.ptrType(.{ + const ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = actual_sentinel, .flags = .{ @@ -21422,7 +21301,7 @@ fn zirReify( try ip.getOrPutString(gpa, "sentinel", .no_embedded_nulls), ).?); - const len = try len_val.toUnsignedIntAdvanced(sema); + const len = try len_val.toUnsignedIntSema(mod); const child_ty = child_val.toType(); const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try mod.singleMutPtrType(child_ty); @@ -21529,7 +21408,7 @@ fn zirReify( const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } @@ -21562,7 +21441,7 @@ fn zirReify( try ip.getOrPutString(gpa, "is_exhaustive", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } @@ -21580,7 +21459,7 @@ fn zirReify( ).?); // Decls - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } @@ -21628,7 +21507,7 @@ fn zirReify( try ip.getOrPutString(gpa, "decls", .no_embedded_nulls), ).?); - if (try decls_val.sliceLen(sema) > 0) { + if (try decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -21987,7 +21866,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -22032,7 +21911,7 @@ fn reifyUnion( field_ty.* = field_type_val.toIntern(); if (any_aligns) { - const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntAdvanced(sema); + const byte_align = try (try field_info.fieldValue(mod, 2)).toUnsignedIntSema(mod); if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) { // TODO: better source location return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align}); @@ -22089,6 +21968,8 @@ fn reifyUnion( loaded_union.flagsPtr(ip).status = .have_field_types; try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -22162,7 +22043,7 @@ fn reifyStruct( if (field_is_comptime) any_comptime_fields = true; if (field_default_value != .none) any_default_inits = true; - switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, sema)) { + switch (try field_alignment_val.orderAgainstZeroAdvanced(mod, .sema)) { .eq => {}, .gt => any_aligned_fields = true, .lt => unreachable, @@ -22245,7 +22126,7 @@ fn reifyStruct( return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema); + const byte_align = try field_alignment_val.toUnsignedIntSema(mod); if (byte_align == 0) { if (layout != .@"packed") { struct_type.field_aligns.get(ip)[field_idx] = .none; @@ -22331,7 +22212,7 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (0..struct_type.field_types.len) |field_idx| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]); - sema.resolveTypeLayout(field_ty) catch |err| switch (err) { + field_ty.resolveLayout(mod) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; try sema.errNote(src, msg, "while checking a field of this struct", .{}); @@ -22353,11 +22234,13 @@ fn reifyStruct( } try mod.finalizeAnonDecl(new_decl_index); + try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); const va_list_ptr = try sema.mod.singleMutPtrType(va_list_ty); const inst = try sema.resolveInst(zir_ref); @@ -22396,7 +22279,7 @@ fn zirCVaCopy(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) const va_list_src = block.builtinCallArgSrc(extra.node, 0); const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.operand); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.c_va_copy, va_list_ty, va_list_ref); @@ -22416,7 +22299,7 @@ fn zirCVaEnd(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const src = block.nodeOffset(@bitCast(extended.operand)); - const va_list_ty = try sema.getBuiltinType("VaList"); + const va_list_ty = try sema.mod.getBuiltinType("VaList"); try sema.requireRuntimeBlock(block, src, null); return block.addInst(.{ .tag = .c_va_start, @@ -22550,7 +22433,7 @@ fn zirFloatFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro _ = try sema.checkIntType(block, operand_src, operand_scalar_ty); if (try sema.resolveValue(operand)) |operand_val| { - const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, sema); + const result_val = try operand_val.floatFromIntAdvanced(sema.arena, operand_ty, dest_ty, mod, .sema); return Air.internedToRef(result_val.toIntern()); } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, .{ @@ -22598,7 +22481,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkPtrType(block, src, ptr_ty, true); const elem_ty = ptr_ty.elemType2(mod); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, .sema); if (ptr_ty.isSlice(mod)) { const msg = msg: { @@ -22697,7 +22580,7 @@ fn ptrFromIntVal( } return sema.failWithUseOfUndef(block, operand_src); } - const addr = try operand_val.toUnsignedIntAdvanced(sema); + const addr = try operand_val.toUnsignedIntSema(zcu); if (!ptr_ty.isAllowzeroPtr(zcu) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(zcu)}); if (addr != 0 and ptr_align != .none and !ptr_align.check(addr)) @@ -22895,8 +22778,8 @@ fn ptrCastFull( const src_info = operand_ty.ptrInfo(mod); const dest_info = dest_ty.ptrInfo(mod); - try sema.resolveTypeLayout(Type.fromInterned(src_info.child)); - try sema.resolveTypeLayout(Type.fromInterned(dest_info.child)); + try Type.fromInterned(src_info.child).resolveLayout(mod); + try Type.fromInterned(dest_info.child).resolveLayout(mod); const src_slice_like = src_info.flags.size == .Slice or (src_info.flags.size == .One and Type.fromInterned(src_info.child).zigTypeTag(mod) == .Array); @@ -23144,7 +23027,7 @@ fn ptrCastFull( // Only convert to a many-pointer at first var info = dest_info; info.flags.size = .Many; - const ty = try sema.ptrType(info); + const ty = try mod.ptrTypeSema(info); if (dest_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(ty.toIntern()); } else { @@ -23162,7 +23045,7 @@ fn ptrCastFull( return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_align.compare(.gt, src_align)) { - if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| { + if (try ptr_val.getUnsignedIntAdvanced(mod, .sema)) |addr| { if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, @@ -23229,7 +23112,7 @@ fn ptrCastFull( // We can't change address spaces with a bitcast, so this requires two instructions var intermediate_info = src_info; intermediate_info.flags.address_space = dest_info.flags.address_space; - const intermediate_ptr_ty = try sema.ptrType(intermediate_info); + const intermediate_ptr_ty = try mod.ptrTypeSema(intermediate_info); const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: { break :blk try mod.optionalType(intermediate_ptr_ty.toIntern()); } else intermediate_ptr_ty; @@ -23286,7 +23169,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst if (flags.volatile_cast) ptr_info.flags.is_volatile = false; const dest_ty = blk: { - const dest_ty = try sema.ptrType(ptr_info); + const dest_ty = try mod.ptrTypeSema(ptr_info); if (operand_ty.zigTypeTag(mod) == .Optional) { break :blk try mod.optionalType(dest_ty.toIntern()); } @@ -23576,7 +23459,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeLayout(ty); + try ty.resolveLayout(mod); switch (ty.zigTypeTag(mod)) { .Struct => {}, else => return sema.fail(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}), @@ -23819,7 +23702,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -23829,7 +23712,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero; wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile; - const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data); + const wanted_ptr_ty = try mod.ptrTypeSema(wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -24006,7 +23889,7 @@ fn resolveExportOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const export_options_ty = try sema.getBuiltinType("ExportOptions"); + const export_options_ty = try mod.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -24070,7 +23953,7 @@ fn resolveBuiltinEnum( reason: NeededComptimeReason, ) CompileError!@field(std.builtin, name) { const mod = sema.mod; - const ty = try sema.getBuiltinType(name); + const ty = try mod.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstDefinedValue(block, src, coerced, reason); @@ -24830,7 +24713,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const extra = sema.code.extraData(Zir.Inst.BuiltinCall, inst_data.payload_index).data; const func = try sema.resolveInst(extra.callee); - const modifier_ty = try sema.getBuiltinType("CallModifier"); + const modifier_ty = try mod.getBuiltinType("CallModifier"); const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstDefinedValue(block, modifier_src, modifier_ref, .{ @@ -24934,7 +24817,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins .Struct, .Union => {}, else => return sema.fail(block, inst_src, "expected pointer to struct or union type, found '{}'", .{parent_ptr_ty.fmt(zcu)}), } - try sema.resolveTypeLayout(parent_ty); + try parent_ty.resolveLayout(zcu); const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .needed_comptime_reason = "field name must be comptime-known", @@ -24965,7 +24848,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_parent_ptr_info: InternPool.Key.PtrType = .{ .child = parent_ty.toIntern(), .flags = .{ - .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24977,7 +24860,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins var actual_field_ptr_info: InternPool.Key.PtrType = .{ .child = field_ty.toIntern(), .flags = .{ - .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, sema), + .alignment = try field_ptr_ty.ptrAlignmentAdvanced(zcu, .sema), .is_const = field_ptr_info.flags.is_const, .is_volatile = field_ptr_info.flags.is_volatile, .is_allowzero = field_ptr_info.flags.is_allowzero, @@ -24988,12 +24871,13 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins switch (parent_ty.containerLayout(zcu)) { .auto => { actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict( - if (zcu.typeToStruct(parent_ty)) |struct_obj| try sema.structFieldAlignment( + if (zcu.typeToStruct(parent_ty)) |struct_obj| try zcu.structFieldAlignmentAdvanced( struct_obj.fieldAlign(ip, field_index), field_ty, struct_obj.layout, + .sema, ) else if (zcu.typeToUnion(parent_ty)) |union_obj| - try sema.unionFieldAlignment(union_obj, field_index) + try zcu.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema) else actual_field_ptr_info.flags.alignment, ); @@ -25023,9 +24907,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins }, } - const actual_field_ptr_ty = try sema.ptrType(actual_field_ptr_info); + const actual_field_ptr_ty = try zcu.ptrTypeSema(actual_field_ptr_info); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, field_ptr_src); - const actual_parent_ptr_ty = try sema.ptrType(actual_parent_ptr_info); + const actual_parent_ptr_ty = try zcu.ptrTypeSema(actual_parent_ptr_info); const result = if (try sema.resolveDefinedValue(block, field_ptr_src, casted_field_ptr)) |field_ptr_val| result: { switch (parent_ty.zigTypeTag(zcu)) { @@ -25085,7 +24969,6 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins break :result try sema.coerce(block, actual_parent_ptr_ty, Air.internedToRef(field.base), inst_src); } else result: { try sema.requireRuntimeBlock(block, inst_src, field_ptr_src); - try sema.queueFullTypeResolution(parent_ty); break :result try block.addInst(.{ .tag = .field_parent_ptr, .data = .{ .ty_pl = .{ @@ -25398,7 +25281,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A // Already an array pointer. return ptr; } - const new_ty = try sema.ptrType(.{ + const new_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = len, .sentinel = info.sentinel, @@ -25497,7 +25380,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!sema.isComptimeMutablePtr(dest_ptr_val)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try mod.intRef(Type.usize, i); @@ -25556,7 +25439,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = try val.toUnsignedIntAdvanced(sema); + const len = try val.toUnsignedIntSema(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -25603,7 +25486,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(dest_manyptr_ty_key.flags.size == .One); dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); dest_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; const new_src_ptr_ty = sema.typeOf(new_src_ptr); @@ -25614,7 +25497,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void assert(src_manyptr_ty_key.flags.size == .One); src_manyptr_ty_key.child = src_elem_ty.toIntern(); src_manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; // ok1: dest >= src + len @@ -25681,7 +25564,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr) orelse break :rs dest_src; const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len", .no_embedded_nulls), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, .sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -25861,7 +25744,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else if (extra.data.bits.has_align_ref) blk: { @@ -25881,7 +25764,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => break :blk null, else => |e| return e, }; - const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntAdvanced(sema)); + const alignment = try sema.validateAlignAllowZero(block, align_src, try align_val.toUnsignedIntSema(mod)); const default = target_util.defaultFunctionAlignment(target); break :blk if (alignment == default) .none else alignment; } else .none; @@ -25957,7 +25840,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.bodySlice(extra_index, body_len); extra_index += body.len; - const cc_ty = try sema.getBuiltinType("CallingConvention"); + const cc_ty = try mod.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, .{ .needed_comptime_reason = "calling convention must be comptime-known", }); @@ -26170,7 +26053,7 @@ fn resolvePrefetchOptions( const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; - const options_ty = try sema.getBuiltinType("PrefetchOptions"); + const options_ty = try mod.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); const rw_src = block.src(.{ .init_field_rw = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26194,7 +26077,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), - .locality = @intCast(try locality_val.toUnsignedIntAdvanced(sema)), + .locality = @intCast(try locality_val.toUnsignedIntSema(mod)), .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -26242,7 +26125,7 @@ fn resolveExternOptions( const gpa = sema.gpa; const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); - const extern_options_ty = try sema.getBuiltinType("ExternOptions"); + const extern_options_ty = try mod.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); const name_src = block.src(.{ .init_field_name = src.offset.node_offset_builtin_call_arg.builtin_call_node }); @@ -26493,7 +26376,7 @@ fn explainWhyTypeIsComptime( var type_set = TypeSet{}; defer type_set.deinit(sema.gpa); - try sema.resolveTypeFully(ty); + try ty.resolveFully(sema.mod); return sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty, &type_set); } @@ -26620,7 +26503,7 @@ const ExternPosition = enum { /// Returns true if `ty` is allowed in extern types. /// Does *NOT* require `ty` to be resolved in any way. -/// Calls `resolveTypeLayout` for packed containers. +/// Calls `resolveLayout` for packed containers. fn validateExternType( sema: *Sema, ty: Type, @@ -26671,7 +26554,7 @@ fn validateExternType( .Struct, .Union => switch (ty.containerLayout(mod)) { .@"extern" => return true, .@"packed" => { - const bit_size = try ty.bitSizeAdvanced(mod, sema); + const bit_size = try ty.bitSizeAdvanced(mod, .sema); switch (bit_size) { 0, 8, 16, 32, 64, 128 => return true, else => return false, @@ -26849,11 +26732,11 @@ fn explainWhyTypeIsNotPacked( } } -fn prepareSimplePanic(sema: *Sema, block: *Block) !void { +fn prepareSimplePanic(sema: *Sema) !void { const mod = sema.mod; if (mod.panic_func_index == .none) { - const decl_index = (try sema.getBuiltinDecl(block, "panic")); + const decl_index = (try mod.getBuiltinDecl("panic")); // decl_index may be an alias; we must find the decl that actually // owns the function. try sema.ensureDeclAnalyzed(decl_index); @@ -26866,10 +26749,10 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void { } if (mod.null_stack_trace == .none) { - const stack_trace_ty = try sema.getBuiltinType("StackTrace"); - try sema.resolveTypeFields(stack_trace_ty); + const stack_trace_ty = try mod.getBuiltinType("StackTrace"); + try stack_trace_ty.resolveFields(mod); const target = mod.getTarget(); - const ptr_stack_trace_ty = try sema.ptrType(.{ + const ptr_stack_trace_ty = try mod.ptrTypeSema(.{ .child = stack_trace_ty.toIntern(), .flags = .{ .address_space = target_util.defaultAddressSpace(target, .global_constant), @@ -26891,9 +26774,9 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); - const panic_messages_ty = try sema.getBuiltinType("panic_messages"); + const panic_messages_ty = try mod.getBuiltinType("panic_messages"); const msg_decl_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, @@ -26999,7 +26882,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst. return; } - try sema.prepareSimplePanic(block); + try sema.prepareSimplePanic(); const panic_func = mod.funcInfo(mod.panic_func_index); const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl); @@ -27045,7 +26928,7 @@ fn panicUnwrapError( if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin("panicUnwrapError"); + const panic_fn = try sema.mod.getBuiltin("panicUnwrapError"); const err = try fail_block.addTyOp(unwrap_err_tag, Type.anyerror, operand); const err_return_trace = try sema.getErrorReturnTrace(&fail_block); const args: [2]Air.Inst.Ref = .{ err_return_trace, err }; @@ -27104,7 +26987,7 @@ fn panicSentinelMismatch( const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { - const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); + const elem_ptr_ty = try ptr_ty.elemPtrType(null, mod); const sentinel_ptr = try parent_block.addPtrElemPtr(ptr, sentinel_index, elem_ptr_ty); break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; @@ -27122,7 +27005,7 @@ fn panicSentinelMismatch( } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { - const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); + const panic_fn = try mod.getBuiltin("checkNonScalarSentinel"); const args: [2]Air.Inst.Ref = .{ expected_sentinel, actual_sentinel }; try sema.callBuiltin(parent_block, src, panic_fn, .auto, &args, .@"safety check"); return; @@ -27161,7 +27044,7 @@ fn safetyCheckFormatted( if (!sema.mod.backendSupportsFeature(.safety_check_formatted)) { _ = try fail_block.addNoOp(.trap); } else { - const panic_fn = try sema.getBuiltin(func); + const panic_fn = try sema.mod.getBuiltin(func); try sema.callBuiltin(&fail_block, src, panic_fn, .auto, args, .@"safety check"); } try sema.addSafetyCheckExtra(parent_block, ok, &fail_block); @@ -27223,7 +27106,7 @@ fn fieldVal( return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (inner_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27320,7 +27203,7 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); @@ -27414,7 +27297,7 @@ fn fieldPtr( return anonDeclRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const new_ptr_ty = try sema.ptrType(.{ + const new_ptr_ty = try mod.ptrTypeSema(.{ .child = Type.fromInterned(ptr_info.child).childType(mod).toIntern(), .sentinel = if (object_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27429,7 +27312,7 @@ fn fieldPtr( .packed_offset = ptr_info.packed_offset, }); const ptr_ptr_info = object_ptr_ty.ptrInfo(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = new_ptr_ty.toIntern(), .sentinel = if (object_ptr_ty.sentinel(mod)) |s| s.toIntern() else .none, .flags = .{ @@ -27463,7 +27346,7 @@ fn fieldPtr( if (field_name.eqlSlice("ptr", ip)) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = slice_ptr_ty.toIntern(), .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27473,7 +27356,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_ptr_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27481,7 +27364,7 @@ fn fieldPtr( try sema.checkKnownAllocPtr(block, inner_ptr, field_ptr); return field_ptr; } else if (field_name.eqlSlice("len", ip)) { - const result_ty = try sema.ptrType(.{ + const result_ty = try mod.ptrTypeSema(.{ .child = .usize_type, .flags = .{ .is_const = !attr_ptr_ty.ptrIsMutable(mod), @@ -27491,7 +27374,7 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return Air.internedToRef((try val.ptrField(Value.slice_len_index, sema)).toIntern()); + return Air.internedToRef((try val.ptrField(Value.slice_len_index, mod)).toIntern()); } try sema.requireRuntimeBlock(block, src, null); @@ -27559,7 +27442,7 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { return inst; } - try sema.resolveTypeFields(child_type); + try child_type.resolveFields(mod); if (child_type.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); @@ -27654,7 +27537,7 @@ fn fieldCallBind( find_field: { switch (concrete_ty.zigTypeTag(mod)) { .Struct => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); if (mod.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; @@ -27680,7 +27563,7 @@ fn fieldCallBind( } }, .Union => { - try sema.resolveTypeFields(concrete_ty); + try concrete_ty.resolveFields(mod); const union_obj = mod.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); @@ -27701,7 +27584,6 @@ fn fieldCallBind( const decl_idx = (try sema.namespaceLookup(block, src, namespace, field_name)) orelse break :found_decl null; - try sema.addReferencedBy(src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); if (mod.typeToFunc(decl_type)) |func_type| f: { @@ -27791,7 +27673,7 @@ fn finishFieldCallBind( object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { const mod = sema.mod; - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !ptr_ty.ptrIsMutable(mod), @@ -27802,14 +27684,14 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldIsComptime(field_index, mod)) { - try sema.resolveStructFieldInits(container_ty); + try container_ty.resolveStructFieldInits(mod); const default_val = (try container_ty.structFieldValueComptime(mod, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const ptr_val = try struct_ptr_val.ptrField(field_index, sema); + const ptr_val = try struct_ptr_val.ptrField(field_index, mod); const pointer = Air.internedToRef(ptr_val.toIntern()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -27857,8 +27739,7 @@ fn namespaceLookupRef( decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, opt_namespace, decl_name)) orelse return null; - try sema.addReferencedBy(src, decl); - return try sema.analyzeDeclRef(decl); + return try sema.analyzeDeclRef(src, decl); } fn namespaceLookupVal( @@ -27886,8 +27767,8 @@ fn structFieldPtr( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructLayout(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveLayout(mod); if (struct_ty.isTuple(mod)) { if (field_name.eqlSlice("len", ip)) { @@ -27926,7 +27807,7 @@ fn structFieldPtrByIndex( } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - const val = try struct_ptr_val.ptrField(field_index, sema); + const val = try struct_ptr_val.ptrField(field_index, mod); return Air.internedToRef(val.toIntern()); } @@ -27970,10 +27851,11 @@ fn structFieldPtrByIndex( @enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset))); } else { // Our alignment is capped at the field alignment. - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, field_index), Type.fromInterned(field_ty), struct_type.layout, + .sema, ); ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none) field_align @@ -27981,10 +27863,10 @@ fn structFieldPtrByIndex( field_align.min(parent_align); } - const ptr_field_ty = try sema.ptrType(ptr_ty_data); + const ptr_field_ty = try mod.ptrTypeSema(ptr_ty_data); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] }, @@ -28010,7 +27892,7 @@ fn structFieldVal( const ip = &mod.intern_pool; assert(struct_ty.zigTypeTag(mod) == .Struct); - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { @@ -28021,7 +27903,7 @@ fn structFieldVal( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.fieldIsComptime(ip, field_index)) { - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]); } @@ -28038,7 +27920,7 @@ fn structFieldVal( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, .anon_struct_type => |anon_struct| { @@ -28105,7 +27987,7 @@ fn tupleFieldValByIndex( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); } @@ -28126,7 +28008,7 @@ fn tupleFieldValByIndex( } try sema.requireRuntimeBlock(block, src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple_byval, field_index, field_ty); } @@ -28147,11 +28029,11 @@ fn unionFieldPtr( const union_ptr_ty = sema.typeOf(union_ptr); const union_ptr_info = union_ptr_ty.ptrInfo(mod); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = union_ptr_info.flags.is_const, @@ -28162,7 +28044,7 @@ fn unionFieldPtr( union_ptr_info.flags.alignment else try sema.typeAbiAlignment(union_ty); - const field_align = try sema.unionFieldAlignment(union_obj, field_index); + const field_align = try mod.unionFieldNormalAlignmentAdvanced(union_obj, field_index, .sema); break :blk union_align.min(field_align); } else union_ptr_info.flags.alignment, }, @@ -28218,7 +28100,7 @@ fn unionFieldPtr( }, .@"packed", .@"extern" => {}, } - const field_ptr_val = try union_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try union_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28253,7 +28135,7 @@ fn unionFieldVal( const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .Union); - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(zcu); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); @@ -28292,7 +28174,7 @@ fn unionFieldVal( .@"packed" => if (tag_matches) { // Fast path - no need to use bitcast logic. return Air.internedToRef(un.val); - } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, sema), 0)) |field_val| { + } else if (try sema.bitCastVal(union_val, field_ty, 0, try union_ty.bitSizeAdvanced(zcu, .sema), 0)) |field_val| { return Air.internedToRef(field_val.toIntern()); }, } @@ -28311,7 +28193,7 @@ fn unionFieldVal( _ = try block.addNoOp(.unreach); return .unreachable_value; } - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(zcu); return block.addStructFieldVal(union_byval, field_index, field_ty); } @@ -28342,7 +28224,7 @@ fn elemPtr( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -28380,11 +28262,11 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); - const elem_ptr = try ptr_val.ptrElem(index, sema); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); + const elem_ptr = try ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); }; - const result_ty = try sema.elemPtrType(indexable_ty, null); + const result_ty = try indexable_ty.elemPtrType(null, mod); try sema.requireRuntimeBlock(block, src, runtime_src); return block.addPtrElemPtr(indexable, elem_index, result_ty); @@ -28398,7 +28280,7 @@ fn elemPtrOneLayerOnly( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); break :blk try sema.tupleFieldPtr(block, indexable_src, indexable, elem_index_src, index, false); }, else => unreachable, // Guaranteed by checkIndexable @@ -28438,12 +28320,12 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_ty = indexable_ty.elemType2(mod); const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); - const elem_ptr_val = try many_ptr_val.ptrElem(index, sema); + const elem_ptr_val = try many_ptr_val.ptrElem(index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef((try mod.getCoerced(elem_val, elem_ty)).toIntern()); } @@ -28459,7 +28341,7 @@ fn elemVal( if (inner_ty.zigTypeTag(mod) != .Array) break :arr_sent; const sentinel = inner_ty.sentinel(mod) orelse break :arr_sent; const index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index) orelse break :arr_sent; - const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, src, try index_val.toUnsignedIntSema(mod)); if (index != inner_ty.arrayLen(mod)) break :arr_sent; return Air.internedToRef(sentinel.toIntern()); } @@ -28477,7 +28359,7 @@ fn elemVal( const index_val = try sema.resolveConstDefinedValue(block, elem_index_src, elem_index, .{ .needed_comptime_reason = "tuple field access index must be comptime-known", }); - const index: u32 = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: u32 = @intCast(try index_val.toUnsignedIntSema(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -28522,7 +28404,7 @@ fn tupleFieldPtr( const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28536,7 +28418,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index, mod); - const ptr_field_ty = try sema.ptrType(.{ + const ptr_field_ty = try mod.ptrTypeSema(.{ .child = field_ty.toIntern(), .flags = .{ .is_const = !tuple_ptr_ty.ptrIsMutable(mod), @@ -28546,7 +28428,7 @@ fn tupleFieldPtr( }); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return Air.internedToRef((try mod.intern(.{ .ptr = .{ @@ -28557,7 +28439,7 @@ fn tupleFieldPtr( } if (try sema.resolveValue(tuple_ptr)) |tuple_ptr_val| { - const field_ptr_val = try tuple_ptr_val.ptrField(field_index, sema); + const field_ptr_val = try tuple_ptr_val.ptrField(field_index, mod); return Air.internedToRef(field_ptr_val.toIntern()); } @@ -28579,7 +28461,7 @@ fn tupleField( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ty = sema.typeOf(tuple); - try sema.resolveTypeFields(tuple_ty); + try tuple_ty.resolveFields(mod); const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { @@ -28595,7 +28477,7 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index, mod); if (tuple_ty.structFieldIsComptime(field_index, mod)) - try sema.resolveStructFieldInits(tuple_ty); + try tuple_ty.resolveStructFieldInits(mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return Air.internedToRef(default_value.toIntern()); // comptime field } @@ -28608,7 +28490,7 @@ fn tupleField( try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); try sema.requireRuntimeBlock(block, tuple_src, null); - try sema.resolveTypeLayout(field_ty); + try field_ty.resolveLayout(mod); return block.addStructFieldVal(tuple, field_index, field_ty); } @@ -28638,7 +28520,7 @@ fn elemValArray( const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (array_sent) |s| { if (index == array_len) { return Air.internedToRef(s.toIntern()); @@ -28654,7 +28536,7 @@ fn elemValArray( return mod.undefRef(elem_ty); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); const elem_val = try array_val.elemValue(mod, index); return Air.internedToRef(elem_val.toIntern()); } @@ -28676,7 +28558,6 @@ fn elemValArray( return Air.internedToRef(elem_val.toIntern()); try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(array_ty); return block.addBinOp(.array_elem_val, array, elem_index); } @@ -28705,7 +28586,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveValue(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -28713,14 +28594,14 @@ fn elemPtrArray( break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); + const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, mod); if (maybe_undef_array_ptr_val) |array_ptr_val| { if (array_ptr_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.ptrElem(index, sema); + const elem_ptr = try array_ptr_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr.toIntern()); } } @@ -28765,19 +28646,19 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index: usize = @intCast(try index_val.toUnsignedIntAdvanced(sema)); + const index: usize = @intCast(try index_val.toUnsignedIntSema(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_ty = try slice_ty.elemPtrType(index, mod); + const elem_ptr_val = try slice_val.ptrElem(index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return Air.internedToRef(elem_val.toIntern()); } @@ -28790,13 +28671,12 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try mod.intRef(Type.usize, try slice_val.sliceLen(sema)) + try mod.intRef(Type.usize, try slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; try sema.panicIndexOutOfBounds(block, src, elem_index, len_inst, cmp_op); } - try sema.queueFullTypeResolution(sema.typeOf(slice)); return block.addBinOp(.slice_elem_val, slice, elem_index); } @@ -28817,17 +28697,17 @@ fn elemPtrSlice( const maybe_undef_slice_val = try sema.resolveValue(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntAdvanced(sema)); + const index = try sema.usizeCast(block, elem_index_src, try index_val.toUnsignedIntSema(mod)); break :o index; } else null; - const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); + const elem_ptr_ty = try slice_ty.elemPtrType(offset, mod); if (maybe_undef_slice_val) |slice_val| { if (slice_val.isUndef(mod)) { return mod.undefRef(elem_ptr_ty); } - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const slice_len_s = slice_len + @intFromBool(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -28837,7 +28717,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.ptrElem(index, sema); + const elem_ptr_val = try slice_val.ptrElem(index, mod); return Air.internedToRef(elem_ptr_val.toIntern()); } } @@ -28850,7 +28730,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef(mod)) - break :len try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :len try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -28915,9 +28795,9 @@ fn coerceExtra( if (dest_ty.isGenericPoison()) return inst; const zcu = sema.mod; const dest_ty_src = inst_src; // TODO better source location - try sema.resolveTypeFields(dest_ty); + try dest_ty.resolveFields(zcu); const inst_ty = sema.typeOf(inst); - try sema.resolveTypeFields(inst_ty); + try inst_ty.resolveFields(zcu); const target = zcu.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, zcu)) @@ -28931,7 +28811,6 @@ fn coerceExtra( return sema.coerceInMemory(val, dest_ty); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_ty); const new_val = try block.addBitCast(dest_ty, inst); try sema.checkKnownAllocPtr(block, inst, new_val); return new_val; @@ -28996,7 +28875,7 @@ fn coerceExtra( if (inst_ty.zigTypeTag(zcu) == .Fn) { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_decl = fn_val.pointerDecl(zcu).?; - const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); + const inst_as_ptr = try sema.analyzeDeclRef(inst_src, fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -29227,7 +29106,7 @@ fn coerceExtra( // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { - const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, sema); + const align_val = try dest_ty.ptrAlignmentAdvanced(zcu, .sema); return Air.internedToRef(try zcu.intern(.{ .slice = .{ .ty = dest_ty.toIntern(), .ptr = try zcu.intern(.{ .ptr = .{ @@ -29372,7 +29251,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, sema); + const result_val = try val.floatFromIntAdvanced(sema.arena, inst_ty, dest_ty, zcu, .sema); // TODO implement this compile error //const int_again_val = try result_val.intFromFloat(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, zcu)) { @@ -30549,7 +30428,7 @@ fn coerceVarArgParam( .Fn => fn_ptr: { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_decl = fn_val.pointerDecl(mod).?; - break :fn_ptr try sema.analyzeDeclRef(fn_decl); + break :fn_ptr try sema.analyzeDeclRef(inst_src, fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Float => float: { @@ -30704,7 +30583,6 @@ fn storePtr2( } try sema.requireRuntimeBlock(block, src, runtime_src); - try sema.queueFullTypeResolution(elem_ty); if (ptr_ty.ptrInfo(mod).flags.vector_index == .runtime) { const ptr_inst = ptr.toIndex().?; @@ -30926,10 +30804,10 @@ fn bitCast( operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { const zcu = sema.mod; - try sema.resolveTypeLayout(dest_ty); + try dest_ty.resolveLayout(zcu); const old_ty = sema.typeOf(inst); - try sema.resolveTypeLayout(old_ty); + try old_ty.resolveLayout(zcu); const dest_bits = dest_ty.bitSize(zcu); const old_bits = old_ty.bitSize(zcu); @@ -31111,7 +30989,7 @@ fn coerceEnumToUnion( const union_obj = mod.typeToUnion(union_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFields(field_ty); + try field_ty.resolveFields(mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot initialize 'noreturn' field of union", .{}); @@ -31524,8 +31402,8 @@ fn coerceTupleToStruct( ) !Air.Inst.Ref { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveFields(mod); + try struct_ty.resolveStructFieldInits(mod); if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); @@ -31776,11 +31654,10 @@ fn analyzeDeclVal( src: LazySrcLoc, decl_index: InternPool.DeclIndex, ) CompileError!Air.Inst.Ref { - try sema.addReferencedBy(src, decl_index); if (sema.decl_val_table.get(decl_index)) |result| { return result; } - const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); + const decl_ref = try sema.analyzeDeclRefInner(src, decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (result.toInterned() != null) { if (!block.is_typeof) { @@ -31790,18 +31667,18 @@ fn analyzeDeclVal( return result; } -fn addReferencedBy( +fn addReferenceEntry( sema: *Sema, src: LazySrcLoc, - decl_index: InternPool.DeclIndex, + referenced_unit: AnalUnit, ) !void { if (sema.mod.comp.reference_trace == 0) return; - try sema.mod.reference_table.put(sema.gpa, decl_index, .{ - // TODO: this can make the reference trace suboptimal. This will be fixed - // once the reference table is reworked for incremental compilation. - .referencer = sema.owner_decl_index, - .src = src, - }); + const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); + if (gop.found_existing) return; + // TODO: we need to figure out how to model inline calls here. + // They aren't references in the analysis sense, but ought to show up in the reference trace! + // Would representing inline calls in the reference table cause excessive memory usage? + try sema.mod.addUnitReference(sema.ownerUnit(), referenced_unit, src); } pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void { @@ -31851,16 +31728,17 @@ fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { } }))); } -fn analyzeDeclRef(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { - return sema.analyzeDeclRefInner(decl_index, true); +fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { + return sema.analyzeDeclRefInner(src, decl_index, true); } /// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but /// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. -fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { +fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { const mod = sema.mod; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); const decl_val = try mod.declPtr(decl_index).valueOrFail(); @@ -31872,7 +31750,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn }); // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type try sema.declareDependency(.{ .decl_val = decl_index }); - const ptr_ty = try sema.ptrType(.{ + const ptr_ty = try mod.ptrTypeSema(.{ .child = decl_val.typeOf(mod).toIntern(), .flags = .{ .alignment = owner_decl.alignment, @@ -31881,7 +31759,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn }, }); if (analyze_fn_body) { - try sema.maybeQueueFuncBodyAnalysis(decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, decl_index); } return Air.internedToRef((try mod.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), @@ -31890,12 +31768,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn } }))); } -fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: InternPool.DeclIndex) !void { +fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) !void { const mod = sema.mod; const decl = mod.declPtr(decl_index); const decl_val = try decl.valueOrFail(); if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return; if (!try sema.fnHasRuntimeBits(decl_val.typeOf(mod))) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = decl_val.toIntern() })); try mod.ensureFuncBodyAnalysisQueued(decl_val.toIntern()); } @@ -31910,22 +31789,22 @@ fn analyzeRef( if (try sema.resolveValue(operand)) |val| { switch (mod.intern_pool.indexToKey(val.toIntern())) { - .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(func.owner_decl), + .extern_func => |extern_func| return sema.analyzeDeclRef(src, extern_func.decl), + .func => |func| return sema.analyzeDeclRef(src, func.owner_decl), else => return anonDeclRef(sema, val.toIntern()), } } try sema.requireRuntimeBlock(block, src, null); const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .is_const = true, .address_space = address_space, }, }); - const mut_ptr_type = try sema.ptrType(.{ + const mut_ptr_type = try mod.ptrTypeSema(.{ .child = operand_ty.toIntern(), .flags = .{ .address_space = address_space }, }); @@ -32033,7 +31912,7 @@ fn analyzeSliceLen( if (slice_val.isUndef(mod)) { return mod.undefRef(Type.usize); } - return mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + return mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.slice_len, Type.usize, slice_inst); @@ -32401,7 +32280,7 @@ fn analyzeSlice( assert(manyptr_ty_key.flags.size == .One); manyptr_ty_key.child = elem_ty.toIntern(); manyptr_ty_key.flags.size = .Many; - break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrTypeSema(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); @@ -32470,7 +32349,7 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - const slice_len = try slice_val.sliceLen(sema); + const slice_len = try slice_val.sliceLen(mod); const len_plus_sent = slice_len + @intFromBool(has_sentinel); const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { @@ -32485,7 +32364,7 @@ fn analyzeSlice( "end index {} out of bounds for slice of length {d}{s}", .{ end_val.fmtValue(mod, sema), - try slice_val.sliceLen(sema), + try slice_val.sliceLen(mod), sentinel_label, }, ); @@ -32558,7 +32437,7 @@ fn analyzeSlice( const many_ptr_ty = try mod.manyConstPtrType(elem_ty); const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); - const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, sema); + const elem_ptr = try many_ptr_val.ptrElem(sentinel_index, mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -32621,9 +32500,9 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.flags.is_allowzero and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = try new_len_val.toUnsignedIntAdvanced(sema); + const new_len_int = try new_len_val.toUnsignedIntSema(mod); - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = (try mod.arrayType(.{ .len = new_len_int, .sentinel = if (sentinel) |s| s.toIntern() else .none, @@ -32685,7 +32564,7 @@ fn analyzeSlice( return sema.fail(block, src, "non-zero length slice of undefined pointer", .{}); } - const return_ty = try sema.ptrType(.{ + const return_ty = try mod.ptrTypeSema(.{ .child = elem_ty.toIntern(), .sentinel = if (sentinel) |s| s.toIntern() else .none, .flags = .{ @@ -32713,7 +32592,7 @@ fn analyzeSlice( if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel - break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(sema)); + break :blk try mod.intRef(Type.usize, try slice_val.sliceLen(mod)); } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); @@ -32805,7 +32684,7 @@ fn cmpNumeric( if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; } - return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) + return if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, .sema)) .bool_true else .bool_false; @@ -32874,11 +32753,11 @@ fn cmpNumeric( // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| - !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try lhs_val.compareAllWithZeroSema(.gte, mod)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| - !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) + !(try rhs_val.compareAllWithZeroSema(.gte, mod)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; @@ -33026,7 +32905,7 @@ fn compareIntsOnlyPossibleResult( ) Allocator.Error!?bool { const mod = sema.mod; const rhs_info = rhs_ty.intInfo(mod); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, .sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -33190,7 +33069,6 @@ fn wrapErrorUnionPayload( } }))); } try sema.requireRuntimeBlock(block, inst_src, null); - try sema.queueFullTypeResolution(dest_payload_ty); return block.addTyOp(.wrap_errunion_payload, dest_ty, coerced); } @@ -33993,7 +33871,7 @@ fn resolvePeerTypesInner( opt_ptr_info = ptr_info; } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .ptr => { @@ -34303,7 +34181,7 @@ fn resolvePeerTypesInner( }, } - return .{ .success = try sema.ptrType(opt_ptr_info.?) }; + return .{ .success = try mod.ptrTypeSema(opt_ptr_info.?) }; }, .func => { @@ -34660,7 +34538,7 @@ fn resolvePeerTypesInner( var comptime_val: ?Value = null; for (peer_tys) |opt_ty| { const struct_ty = opt_ty orelse continue; - try sema.resolveStructFieldInits(struct_ty); + try struct_ty.resolveStructFieldInits(mod); const uncoerced_field_val = try struct_ty.structFieldValueComptime(mod, field_index) orelse { comptime_val = null; @@ -34796,181 +34674,22 @@ pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const ip = &mod.intern_pool; const fn_ty_info = mod.typeToFunc(fn_ty).?; - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.return_type)); + try Type.fromInterned(fn_ty_info.return_type).resolveFully(mod); if (mod.comp.config.any_error_tracing and Type.fromInterned(fn_ty_info.return_type).isError(mod)) { // Ensure the type exists so that backends can assume that. - _ = try sema.getBuiltinType("StackTrace"); + _ = try mod.getBuiltinType("StackTrace"); } for (0..fn_ty_info.param_types.len) |i| { - try sema.resolveTypeFully(Type.fromInterned(fn_ty_info.param_types.get(ip)[i])); + try Type.fromInterned(fn_ty_info.param_types.get(ip)[i]).resolveFully(mod); } } -/// Make it so that calling hash() and eql() on `val` will not assert due -/// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => return val, - .lazy_align, .lazy_size => return mod.intValue( - Type.fromInterned(int.ty), - (try val.getUnsignedIntAdvanced(mod, sema)).?, - ), - }, - .slice => |slice| { - const ptr = try sema.resolveLazyValue(Value.fromInterned(slice.ptr)); - const len = try sema.resolveLazyValue(Value.fromInterned(slice.len)); - if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; - return Value.fromInterned(try mod.intern(.{ .slice = .{ - .ty = slice.ty, - .ptr = ptr.toIntern(), - .len = len.toIntern(), - } })); - }, - .ptr => |ptr| { - switch (ptr.base_addr) { - .decl, .comptime_alloc, .anon_decl, .int => return val, - .comptime_field => |field_val| { - const resolved_field_val = - (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern(); - return if (resolved_field_val == field_val) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = .{ .comptime_field = resolved_field_val }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .eu_payload, .opt_payload => |base| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base))).toIntern(); - return if (resolved_base == base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .eu_payload => .{ .eu_payload = resolved_base }, - .opt_payload => .{ .opt_payload = resolved_base }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - .arr_elem, .field => |base_index| { - const resolved_base = (try sema.resolveLazyValue(Value.fromInterned(base_index.base))).toIntern(); - return if (resolved_base == base_index.base) - val - else - Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ptr.ty, - .base_addr = switch (ptr.base_addr) { - .arr_elem => .{ .arr_elem = .{ - .base = resolved_base, - .index = base_index.index, - } }, - .field => .{ .field = .{ - .base = resolved_base, - .index = base_index.index, - } }, - else => unreachable, - }, - .byte_offset = ptr.byte_offset, - } }))); - }, - } - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => return val, - .elems => |elems| { - var resolved_elems: []InternPool.Index = &.{}; - for (elems, 0..) |elem, i| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - if (resolved_elems.len == 0 and resolved_elem != elem) { - resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); - @memcpy(resolved_elems[0..i], elems[0..i]); - } - if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; - } - return if (resolved_elems.len == 0) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .elems = resolved_elems }, - } }))); - }, - .repeated_elem => |elem| { - const resolved_elem = (try sema.resolveLazyValue(Value.fromInterned(elem))).toIntern(); - return if (resolved_elem == elem) val else Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = aggregate.ty, - .storage = .{ .repeated_elem = resolved_elem }, - } }))); - }, - }, - .un => |un| { - const resolved_tag = if (un.tag == .none) - .none - else - (try sema.resolveLazyValue(Value.fromInterned(un.tag))).toIntern(); - const resolved_val = (try sema.resolveLazyValue(Value.fromInterned(un.val))).toIntern(); - return if (resolved_tag == un.tag and resolved_val == un.val) - val - else - Value.fromInterned((try mod.intern(.{ .un = .{ - .ty = un.ty, - .tag = resolved_tag, - .val = resolved_val, - } }))); - }, - else => return val, - } -} - -pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .simple_type => |simple_type| return sema.resolveSimpleType(simple_type), - else => {}, - } - switch (ty.zigTypeTag(mod)) { - .Struct => return sema.resolveStructLayout(ty), - .Union => return sema.resolveUnionLayout(ty), - .Array => { - if (ty.arrayLenIncludingSentinel(mod) == 0) return; - const elem_ty = ty.childType(mod); - return sema.resolveTypeLayout(elem_ty); - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - // In case of querying the ABI alignment of this optional, we will ask - // for hasRuntimeBits() of the payload type, so we need "requires comptime" - // to be known already before this function returns. - _ = try sema.typeRequiresComptime(payload_ty); - return sema.resolveTypeLayout(payload_ty); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - return sema.resolveTypeLayout(payload_ty); - }, - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - const ip = &mod.intern_pool; - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeLayout(Type.fromInterned(param_ty)); - } - try sema.resolveTypeLayout(Type.fromInterned(info.return_type)); - }, - else => {}, - } + return val.resolveLazy(sema.arena, sema.mod); } /// Resolve a struct's alignment only without triggering resolution of its layout. @@ -34979,11 +34698,13 @@ pub fn resolveStructAlignment( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(struct_type.flagsPtr(ip).alignment == .none); assert(struct_type.layout != .@"packed"); @@ -34994,7 +34715,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsStruct(ty, struct_type); @@ -35006,7 +34727,7 @@ pub fn resolveStructAlignment( struct_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); struct_type.flagsPtr(ip).alignment = result; - return result; + return; } defer struct_type.clearAlignmentWip(ip); @@ -35016,30 +34737,35 @@ pub fn resolveStructAlignment( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) continue; - const field_align = try sema.structFieldAlignment( + const field_align = try mod.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); result = result.maxStrict(field_align); } struct_type.flagsPtr(ip).alignment = result; - return result; } -fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.haveLayout(ip)) return; - try sema.resolveTypeFields(ty); + try ty.resolveFields(zcu); if (struct_type.layout == .@"packed") { - try semaBackingIntType(zcu, struct_type); + semaBackingIntType(zcu, struct_type) catch |err| switch (err) { + error.OutOfMemory, error.AnalysisFail => |e| return e, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, + }; return; } @@ -35075,10 +34801,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }, else => return err, }; - field_align.* = try sema.structFieldAlignment( + field_align.* = try zcu.structFieldAlignmentAdvanced( struct_type.fieldAlign(ip, i), field_ty, struct_type.layout, + .sema, ); big_align = big_align.maxStrict(field_align.*); } @@ -35214,7 +34941,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co var accumulator: u64 = 0; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - accumulator += try field_ty.bitSizeAdvanced(mod, &sema); + accumulator += try field_ty.bitSizeAdvanced(mod, .sema); } break :blk accumulator; }; @@ -35263,6 +34990,8 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.LoadedStructType) Co const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } + + try sema.flushExports(); } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { @@ -35322,11 +35051,13 @@ pub fn resolveUnionAlignment( sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType, -) CompileError!Alignment { +) SemaError!void { const mod = sema.mod; const ip = &mod.intern_pool; const target = mod.getTarget(); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(!union_type.haveLayout(ip)); if (union_type.flagsPtr(ip).status == .field_types_wip) { @@ -35336,7 +35067,7 @@ pub fn resolveUnionAlignment( union_type.flagsPtr(ip).assumed_pointer_aligned = true; const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); union_type.flagsPtr(ip).alignment = result; - return result; + return; } try sema.resolveTypeFieldsUnion(ty, union_type); @@ -35356,11 +35087,10 @@ pub fn resolveUnionAlignment( } union_type.flagsPtr(ip).alignment = max_align; - return max_align; } /// This logic must be kept in sync with `Module.getUnionLayout`. -fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; @@ -35369,6 +35099,8 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // Load again, since the tag type might have changed due to resolution. const union_type = ip.loadUnionType(ty.ip_index); + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (union_type.flagsPtr(ip).status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -35477,53 +35209,15 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. -pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { - .Pointer => { - return sema.resolveTypeFully(ty.childType(mod)); - }, - .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => try sema.resolveStructFully(ty), - .anon_struct_type => |tuple| { - for (tuple.types.get(ip)) |field_ty| { - try sema.resolveTypeFully(Type.fromInterned(field_ty)); - } - }, - .simple_type => |simple_type| try sema.resolveSimpleType(simple_type), - else => {}, - }, - .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType(mod)), - .Optional => { - return sema.resolveTypeFully(ty.optionalChild(mod)); - }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), - .Fn => { - const info = mod.typeToFunc(ty).?; - if (info.is_generic) { - // Resolving of generic function types is deferred to when - // the function is instantiated. - return; - } - for (0..info.param_types.len) |i| { - const param_ty = info.param_types.get(ip)[i]; - try sema.resolveTypeFully(Type.fromInterned(param_ty)); - } - try sema.resolveTypeFully(Type.fromInterned(info.return_type)); - }, - else => {}, - } -} - -fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; + assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); @@ -35533,16 +35227,19 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } } -fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveUnionLayout(ty); const mod = sema.mod; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; + + assert(sema.ownerUnit().unwrap().decl == union_obj.decl); + switch (union_obj.flagsPtr(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -35558,7 +35255,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { union_obj.flagsPtr(ip).status = .fully_resolved_wip; for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - try sema.resolveTypeFully(field_ty); + try field_ty.resolveFully(mod); } union_obj.flagsPtr(ip).status = .fully_resolved; } @@ -35567,135 +35264,18 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { _ = try sema.typeRequiresComptime(ty); } -pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void { - const mod = sema.mod; - const ip = &mod.intern_pool; - const ty_ip = ty.toIntern(); - - switch (ty_ip) { - .none => unreachable, - - .u0_type, - .i0_type, - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u80_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .adhoc_inferred_error_set_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .anyframe_type, - .null_type, - .undefined_type, - .enum_literal_type, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, - .single_const_pointer_to_comptime_int_type, - .slice_const_u8_type, - .slice_const_u8_sentinel_0_type, - .optional_noreturn_type, - .anyerror_void_error_union_type, - .generic_poison_type, - .empty_struct_type, - => {}, - - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .zero_u8 => unreachable, - .one => unreachable, - .one_usize => unreachable, - .one_u8 => unreachable, - .four_u8 => unreachable, - .negative_one => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { - .type_struct, - .type_struct_packed, - .type_struct_packed_inits, - => try sema.resolveTypeFieldsStruct(ty_ip, ip.loadStructType(ty_ip)), - - .type_union => try sema.resolveTypeFieldsUnion(Type.fromInterned(ty_ip), ip.loadUnionType(ty_ip)), - .simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type), - else => {}, - }, - } -} - -/// Fully resolves a simple type. This is usually a nop, but for builtin types with -/// special InternPool indices (such as std.builtin.Type) it will analyze and fully -/// resolve the container type. -fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileError!void { - const builtin_type_name: []const u8 = switch (simple_type) { - .atomic_order => "AtomicOrder", - .atomic_rmw_op => "AtomicRmwOp", - .calling_convention => "CallingConvention", - .address_space => "AddressSpace", - .float_mode => "FloatMode", - .reduce_op => "ReduceOp", - .call_modifier => "CallModifer", - .prefetch_options => "PrefetchOptions", - .export_options => "ExportOptions", - .extern_options => "ExternOptions", - .type_info => "Type", - else => return, - }; - // This will fully resolve the type. - _ = try sema.getBuiltinType(builtin_type_name); -} - pub fn resolveTypeFieldsStruct( sema: *Sema, ty: InternPool.Index, struct_type: InternPool.LoadedStructType, -) CompileError!void { +) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; // If there is no owner decl it means the struct has no fields. const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + switch (zcu.declPtr(owner_decl).analysis) { .file_failure, .dependency_failure, @@ -35726,16 +35306,19 @@ pub fn resolveTypeFieldsStruct( } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; } -pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { +pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; const owner_decl = struct_type.decl.unwrap() orelse return; + assert(sema.ownerUnit().unwrap().decl == owner_decl); + // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; @@ -35758,15 +35341,19 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) CompileError!void { } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; struct_type.setHaveFieldInits(ip); } -pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) CompileError!void { +pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.LoadedUnionType) SemaError!void { const zcu = sema.mod; const ip = &zcu.intern_pool; const owner_decl = zcu.declPtr(union_type.decl); + + assert(sema.ownerUnit().unwrap().decl == union_type.decl); + switch (owner_decl.analysis) { .file_failure, .dependency_failure, @@ -35804,7 +35391,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load } return error.AnalysisFail; }, - else => |e| return e, + error.OutOfMemory => return error.OutOfMemory, + error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; union_type.flagsPtr(ip).status = .have_field_types; } @@ -35860,6 +35448,7 @@ fn resolveInferredErrorSet( } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = func_index })); try sema.ensureFuncBodyAnalyzed(func_index); } @@ -36225,6 +35814,8 @@ fn semaStructFields( struct_type.clearTypesWip(ip); if (!any_inits) struct_type.setHaveFieldInits(ip); + + try sema.flushExports(); } // This logic must be kept in sync with `semaStructFields` @@ -36365,6 +35956,8 @@ fn semaStructFieldInits( struct_type.field_inits.get(ip)[field_i] = default_val.toIntern(); } } + + try sema.flushExports(); } fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { @@ -36738,6 +36331,8 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, mod.declPtr(union_type.decl)); union_type.tagTypePtr(ip).* = enum_ty; } + + try sema.flushExports(); } fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Type, tag_ref: Air.Inst.Ref) CompileError!Value { @@ -36846,106 +36441,6 @@ fn generateUnionTagTypeSimple( return enum_ty; } -fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - const zcu = sema.mod; - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const decl_index = try getBuiltinDecl(sema, &block, name); - return sema.analyzeDeclVal(&block, src, decl_index); -} - -fn getBuiltinDecl(sema: *Sema, block: *Block, name: []const u8) CompileError!InternPool.DeclIndex { - const gpa = sema.gpa; - - const src = block.nodeOffset(0); - - const mod = sema.mod; - const ip = &mod.intern_pool; - const std_mod = mod.std_mod; - const std_file = (mod.importPkg(std_mod) catch unreachable).file; - const opt_builtin_inst = (try sema.namespaceLookupRef( - block, - src, - mod.declPtr(std_file.root_decl.unwrap().?).src_namespace.toOptional(), - try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls), - )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); - const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst, src); - const builtin_ty = sema.analyzeAsType(block, src, builtin_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin is corrupt", .{}), - else => |e| return e, - }; - const decl_index = (try sema.namespaceLookup( - block, - src, - builtin_ty.getNamespaceIndex(mod), - try ip.getOrPutString(gpa, name, .no_embedded_nulls), - )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); - return decl_index; -} - -fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { - const zcu = sema.mod; - const ty_inst = try sema.getBuiltin(name); - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = sema.owner_decl.src_namespace, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - .src_base_inst = sema.owner_decl.zir_decl_index.unwrap() orelse owner: { - assert(sema.owner_decl.has_tv); - assert(sema.owner_decl.owns_tv); - switch (sema.owner_decl.typeOf(zcu).zigTypeTag(zcu)) { - .Type => break :owner sema.owner_decl.val.toType().typeDeclInst(zcu).?, - .Fn => { - const owner = zcu.funcInfo(sema.owner_decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :owner generic_owner_decl.zir_decl_index.unwrap().?; - }, - else => unreachable, - } - }, - .type_name_ctx = sema.owner_decl.name, - }; - defer block.instructions.deinit(sema.gpa); - - const src = block.nodeOffset(0); - - const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) { - error.AnalysisFail => std.debug.panic("std.builtin.{s} is corrupt", .{name}), - else => |e| return e, - }; - try sema.resolveTypeFully(result_ty); // Should not fail - return result_ty; -} - /// There is another implementation of this in `Type.onePossibleValue`. This one /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts @@ -37149,8 +36644,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .struct_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const struct_type = ip.loadStructType(ty.toIntern()); - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); if (struct_type.field_types.len == 0) { // In this case the struct has no fields at all and @@ -37167,20 +36665,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { ); for (field_vals, 0..) |*field_val, i| { if (struct_type.fieldIsComptime(ip, i)) { - try sema.resolveStructFieldInits(ty); + try ty.resolveStructFieldInits(zcu); field_val.* = struct_type.field_inits.get(ip)[i]; continue; } const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "struct '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| { field_val.* = field_opv.toIntern(); } else return null; @@ -37208,8 +36697,11 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .union_type => { + // Resolving the layout first helps to avoid loops. + // If the type has a coherent layout, we can recurse through fields safely. + try ty.resolveLayout(zcu); + const union_obj = ip.loadUnionType(ty.toIntern()); - try sema.resolveTypeFieldsUnion(ty, union_obj); const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse return null; if (union_obj.field_types.len == 0) { @@ -37217,15 +36709,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.fromInterned(only); } const only_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]); - if (only_field_ty.eql(ty, zcu)) { - const msg = try sema.errMsg( - ty.srcLoc(zcu), - "union '{}' depends on itself", - .{ty.fmt(zcu)}, - ); - try sema.addFieldErrNote(ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } const val_val = (try sema.typeHasOnePossibleValue(only_field_ty)) orelse return null; const only = try zcu.intern(.{ .un = .{ @@ -37343,7 +36826,7 @@ fn analyzeComptimeAlloc( // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); - const ptr_type = try sema.ptrType(.{ + const ptr_type = try mod.ptrTypeSema(.{ .child = var_type.toIntern(), .flags = .{ .alignment = alignment, @@ -37530,64 +37013,28 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// `generic_poison` will return false. /// May return false negatives when structs and unions are having their field types resolved. -pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return ty.comptimeOnlyAdvanced(sema.mod, sema); +pub fn typeRequiresComptime(sema: *Sema, ty: Type) SemaError!bool { + return ty.comptimeOnlyAdvanced(sema.mod, .sema); } -pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const mod = sema.mod; - return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { +pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) SemaError!bool { + return ty.hasRuntimeBitsAdvanced(sema.mod, false, .sema) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; } -pub fn typeAbiSize(sema: *Sema, ty: Type) !u64 { - try sema.resolveTypeLayout(ty); +pub fn typeAbiSize(sema: *Sema, ty: Type) SemaError!u64 { + try ty.resolveLayout(sema.mod); return ty.abiSize(sema.mod); } -pub fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment { - return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; -} - -/// Not valid to call for packed unions. -/// Keep implementation in sync with `Module.unionFieldNormalAlignment`. -pub fn unionFieldAlignment(sema: *Sema, u: InternPool.LoadedUnionType, field_index: u32) !Alignment { - const mod = sema.mod; - const ip = &mod.intern_pool; - const field_align = u.fieldAlign(ip, field_index); - if (field_align != .none) return field_align; - const field_ty = Type.fromInterned(u.field_types.get(ip)[field_index]); - if (field_ty.isNoReturn(sema.mod)) return .none; - return sema.typeAbiAlignment(field_ty); -} - -/// Keep implementation in sync with `Module.structFieldAlignment`. -pub fn structFieldAlignment( - sema: *Sema, - explicit_alignment: InternPool.Alignment, - field_ty: Type, - layout: std.builtin.Type.ContainerLayout, -) !Alignment { - if (explicit_alignment != .none) - return explicit_alignment; - const mod = sema.mod; - switch (layout) { - .@"packed" => return .none, - .auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty), - .@"extern" => {}, - } - // extern - const ty_abi_align = try sema.typeAbiAlignment(field_ty); - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - return ty_abi_align.maxStrict(.@"16"); - } - return ty_abi_align; +pub fn typeAbiAlignment(sema: *Sema, ty: Type) SemaError!Alignment { + return (try ty.abiAlignmentAdvanced(sema.mod, .sema)).scalar; } pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.fnHasRuntimeBitsAdvanced(sema.mod, sema); + return ty.fnHasRuntimeBitsAdvanced(sema.mod, .sema); } fn unionFieldIndex( @@ -37599,7 +37046,7 @@ fn unionFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(union_ty); + try union_ty.resolveFields(mod); const union_obj = mod.typeToUnion(union_ty).?; const field_index = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name); @@ -37615,7 +37062,7 @@ fn structFieldIndex( ) !u32 { const mod = sema.mod; const ip = &mod.intern_pool; - try sema.resolveTypeFields(struct_ty); + try struct_ty.resolveFields(mod); if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { @@ -37646,10 +37093,6 @@ fn anonStructFieldIndex( }); } -fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); -} - /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { @@ -37707,8 +37150,8 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37797,8 +37240,8 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -37881,8 +37324,8 @@ fn intSubWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38069,7 +37512,7 @@ fn intFitsInType( fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { const mod = sema.mod; - if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; + if (!(try int_val.compareAllWithZeroSema(.gte, mod))) return false; const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; @@ -38139,8 +37582,8 @@ fn intAddWithOverflowScalar( var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, .sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, .sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -38194,7 +37637,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), - else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, .sema), } } @@ -38230,80 +37673,6 @@ fn compareVector( } }))); } -/// Returns the type of a pointer to an element. -/// Asserts that the type is a pointer, and that the element type is indexable. -/// If the element index is comptime-known, it must be passed in `offset`. -/// For *@Vector(n, T), return *align(a:b:h:v) T -/// For *[N]T, return *T -/// For [*]T, returns *T -/// For []T, returns *T -/// Handles const-ness and address spaces in particular. -/// This code is duplicated in `analyzePtrArithmetic`. -pub fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { - const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = ptr_ty.elemType2(mod); - const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(mod); - - const VI = InternPool.Key.PtrType.VectorIndex; - - const vector_info: struct { - host_size: u16 = 0, - alignment: Alignment = .none, - vector_index: VI = .none, - } = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: { - const elem_bits = elem_ty.bitSize(mod); - if (elem_bits == 0) break :blk .{}; - const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); - if (!is_packed) break :blk .{}; - - break :blk .{ - .host_size = @intCast(parent_ty.arrayLen(mod)), - .alignment = parent_ty.abiAlignment(mod), - .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, - }; - } else .{}; - - const alignment: Alignment = a: { - // Calculate the new pointer alignment. - if (ptr_info.flags.alignment == .none) { - // In case of an ABI-aligned pointer, any pointer arithmetic - // maintains the same ABI-alignedness. - break :a vector_info.alignment; - } - // If the addend is not a comptime-known value we can still count on - // it being a multiple of the type size. - const elem_size = try sema.typeAbiSize(elem_ty); - const addend = if (offset) |off| elem_size * off else elem_size; - - // The resulting pointer is aligned to the lcd between the offset (an - // arbitrary number) and the alignment factor (always a power of two, - // non zero). - const new_align: Alignment = @enumFromInt(@min( - @ctz(addend), - ptr_info.flags.alignment.toLog2Units(), - )); - assert(new_align != .none); - break :a new_align; - }; - return sema.ptrType(.{ - .child = elem_ty.toIntern(), - .flags = .{ - .alignment = alignment, - .is_const = ptr_info.flags.is_const, - .is_volatile = ptr_info.flags.is_volatile, - .is_allowzero = is_allowzero, - .address_space = ptr_info.flags.address_space, - .vector_index = vector_info.vector_index, - }, - .packed_offset = .{ - .host_size = vector_info.host_size, - .bit_offset = 0, - }, - }); -} - /// Merge lhs with rhs. /// Asserts that lhs and rhs are both error sets and are resolved. fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { @@ -38344,13 +37713,6 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; } -pub fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type { - if (info.flags.alignment != .none) { - _ = try sema.typeAbiAlignment(Type.fromInterned(info.child)); - } - return sema.mod.ptrType(info); -} - pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { if (!sema.mod.comp.debug_incremental) return; @@ -38362,7 +37724,7 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { return; } - const depender = InternPool.AnalSubject.wrap( + const depender = AnalUnit.wrap( if (sema.owner_func_index != .none) .{ .func = sema.owner_func_index } else @@ -38470,12 +37832,12 @@ fn maybeDerefSliceAsArray( else => unreachable, }; const elem_ty = Type.fromInterned(slice.ty).childType(zcu); - const len = try Value.fromInterned(slice.len).toUnsignedIntAdvanced(sema); + const len = try Value.fromInterned(slice.len).toUnsignedIntSema(zcu); const array_ty = try zcu.arrayType(.{ .child = elem_ty.toIntern(), .len = len, }); - const ptr_ty = try sema.ptrType(p: { + const ptr_ty = try zcu.ptrTypeSema(p: { var p = Type.fromInterned(slice.ty).ptrInfo(zcu); p.flags.size = .One; p.child = array_ty.toIntern(); @@ -38494,6 +37856,57 @@ fn analyzeUnreachable(sema: *Sema, block: *Block, src: LazySrcLoc, safety_check: } } +/// This should be called exactly once, at the end of a `Sema`'s lifetime. +/// It takes the exports stored in `sema.export` and flushes them to the `Zcu` +/// to be processed by the linker after the update. +pub fn flushExports(sema: *Sema) !void { + if (sema.exports.items.len == 0) return; + + const zcu = sema.mod; + const gpa = zcu.gpa; + + const unit = sema.ownerUnit(); + + // There may be existing exports. For instance, a struct may export + // things during both field type resolution and field default resolution. + // + // So, pick up and delete any existing exports. This strategy performs + // redundant work, but that's okay, because this case is exceedingly rare. + if (zcu.single_exports.get(unit)) |export_idx| { + try sema.exports.append(gpa, zcu.all_exports.items[export_idx]); + } else if (zcu.multi_exports.get(unit)) |info| { + try sema.exports.appendSlice(gpa, zcu.all_exports.items[info.index..][0..info.len]); + } + zcu.deleteUnitExports(unit); + + // `sema.exports` is completed; store the data into the `Zcu`. + if (sema.exports.items.len == 1) { + try zcu.single_exports.ensureUnusedCapacity(gpa, 1); + const export_idx = zcu.free_exports.popOrNull() orelse idx: { + _ = try zcu.all_exports.addOne(gpa); + break :idx zcu.all_exports.items.len - 1; + }; + zcu.all_exports.items[export_idx] = sema.exports.items[0]; + zcu.single_exports.putAssumeCapacityNoClobber(unit, @intCast(export_idx)); + } else { + try zcu.multi_exports.ensureUnusedCapacity(gpa, 1); + const exports_base = zcu.all_exports.items.len; + try zcu.all_exports.appendSlice(gpa, sema.exports.items); + zcu.multi_exports.putAssumeCapacityNoClobber(unit, .{ + .index = @intCast(exports_base), + .len = @intCast(sema.exports.items.len), + }); + } +} + +pub fn ownerUnit(sema: Sema) AnalUnit { + if (sema.owner_func_index != .none) { + return AnalUnit.wrap(.{ .func = sema.owner_func_index }); + } else { + return AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); + } +} + pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice; diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 62a0122fa1..3c3ccdbfaa 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -78,8 +78,8 @@ fn bitCastInner( const val_ty = val.typeOf(zcu); - try sema.resolveTypeLayout(val_ty); - try sema.resolveTypeLayout(dest_ty); + try val_ty.resolveLayout(zcu); + try dest_ty.resolveLayout(zcu); assert(val_ty.hasWellDefinedLayout(zcu)); @@ -136,8 +136,8 @@ fn bitCastSpliceInner( const val_ty = val.typeOf(zcu); const splice_val_ty = splice_val.typeOf(zcu); - try sema.resolveTypeLayout(val_ty); - try sema.resolveTypeLayout(splice_val_ty); + try val_ty.resolveLayout(zcu); + try splice_val_ty.resolveLayout(zcu); const splice_bits = splice_val_ty.bitSize(zcu); @@ -767,6 +767,6 @@ const assert = std.debug.assert; const Sema = @import("../Sema.zig"); const Zcu = @import("../Zcu.zig"); const InternPool = @import("../InternPool.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const CompileError = Zcu.CompileError; diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index 59c4c9507d..d8e638ca26 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -1054,7 +1054,7 @@ const ComptimeAllocIndex = InternPool.ComptimeAllocIndex; const Sema = @import("../Sema.zig"); const Block = Sema.Block; const MutableValue = @import("../mutable_value.zig").MutableValue; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Zcu = @import("../Zcu.zig"); const LazySrcLoc = Zcu.LazySrcLoc; diff --git a/src/Type.zig b/src/Type.zig new file mode 100644 index 0000000000..9f11a70bf3 --- /dev/null +++ b/src/Type.zig @@ -0,0 +1,4009 @@ +//! Both types and values are canonically represented by a single 32-bit integer +//! which is an index into an `InternPool` data structure. +//! This struct abstracts around this storage by providing methods only +//! applicable to types rather than values in general. + +const std = @import("std"); +const builtin = @import("builtin"); +const Allocator = std.mem.Allocator; +const Value = @import("Value.zig"); +const assert = std.debug.assert; +const Target = std.Target; +const Zcu = @import("Zcu.zig"); +/// Deprecated. +const Module = Zcu; +const log = std.log.scoped(.Type); +const target_util = @import("target.zig"); +const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); +const Alignment = InternPool.Alignment; +const Zir = std.zig.Zir; +const Type = @This(); +const SemaError = Zcu.SemaError; + +ip_index: InternPool.Index, + +pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; +} + +pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); +} + +pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), + .Optional => { + return self.optionalChild(mod).baseZigTypeTag(mod); + }, + else => |t| t, + }; +} + +pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + => true, + + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), + + .Bool, + .Type, + .Void, + .ErrorSet, + .Fn, + .Opaque, + .AnyFrame, + .Enum, + .EnumLiteral, + => is_equality_cmp, + + .NoReturn, + .Array, + .Struct, + .Undefined, + .Null, + .ErrorUnion, + .Union, + .Frame, + => false, + + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), + .Optional => { + if (!is_equality_cmp) return false; + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); + }, + }; +} + +/// If it is a function pointer, returns the function type. Otherwise returns null. +pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; + const elem_ty = ty.childType(mod); + if (elem_ty.zigTypeTag(mod) != .Fn) return null; + return elem_ty; +} + +/// Asserts the type is a pointer. +pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; +} + +pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, +}; + +pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { + return .{ + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), + }; +} + +pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |p| p, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| p, + else => unreachable, + }, + else => unreachable, + }; +} + +pub fn eql(a: Type, b: Type, mod: *const Module) bool { + _ = mod; // TODO: remove this parameter + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return a.toIntern() == b.toIntern(); +} + +pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = ty; + _ = unused_fmt_string; + _ = options; + _ = writer; + @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); +} + +pub const Formatter = std.fmt.Formatter(format2); + +pub fn fmt(ty: Type, module: *Module) Formatter { + return .{ .data = .{ + .ty = ty, + .module = module, + } }; +} + +const FormatContext = struct { + ty: Type, + module: *Module, +}; + +fn format2( + ctx: FormatContext, + comptime unused_format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) !void { + comptime assert(unused_format_string.len == 0); + _ = options; + return print(ctx.ty, writer, ctx.module); +} + +pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { + return .{ .data = ty }; +} + +/// This is a debug function. In order to print types in a meaningful way +/// we also need access to the module. +pub fn dump( + start_type: Type, + comptime unused_format_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + _ = options; + comptime assert(unused_format_string.len == 0); + return writer.print("{any}", .{start_type.ip_index}); +} + +/// Prints a name suitable for `@typeName`. +/// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. +pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => { + const info = ty.ptrInfo(mod); + + if (info.sentinel != .none) switch (info.flags.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), + } else switch (info.flags.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), + } + if (info.flags.alignment != .none or + info.packed_offset.host_size != 0 or + info.flags.vector_index != .none) + { + const alignment = if (info.flags.alignment != .none) + info.flags.alignment + else + Type.fromInterned(info.child).abiAlignment(mod); + try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); + + if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { + try writer.print(":{d}:{d}", .{ + info.packed_offset.bit_offset, info.packed_offset.host_size, + }); + } + if (info.flags.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.flags.vector_index != .none) { + try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); + } + try writer.writeAll(") "); + } + if (info.flags.address_space != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)}); + } + if (info.flags.is_const) try writer.writeAll("const "); + if (info.flags.is_volatile) try writer.writeAll("volatile "); + if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); + + try print(Type.fromInterned(info.child), writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(Type.fromInterned(array_type.child), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + Value.fromInterned(array_type.sentinel).fmtValue(mod, null), + }); + try print(Type.fromInterned(array_type.child), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(Type.fromInterned(vector_type.child), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + return print(Type.fromInterned(child), writer, mod); + }, + .error_union_type => |error_union_type| { + try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); + try writer.writeByte('!'); + if (error_union_type.payload_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(error_union_type.payload_type), writer, mod); + } + return; + }, + .inferred_error_set_type => |func_index| { + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.funcOwnerDeclPtr(func_index); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names.get(ip), 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.print("{}", .{name.fmt(ip)}); + } + try writer.writeAll("}"); + }, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + .adhoc_inferred_error_set, + => return writer.writeAll(@tagName(s)), + + .null, + .undefined, + => try writer.print("@TypeOf({s})", .{@tagName(s)}), + + .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), + .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), + .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), + .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), + .address_space => try writer.writeAll("std.builtin.AddressSpace"), + .float_mode => try writer.writeAll("std.builtin.FloatMode"), + .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), + .call_modifier => try writer.writeAll("std.builtin.CallModifier"), + .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), + .export_options => try writer.writeAll("std.builtin.ExportOptions"), + .extern_options => try writer.writeAll("std.builtin.ExternOptions"), + .type_info => try writer.writeAll("std.builtin.Type"), + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.decl.unwrap()) |decl_index| { + const decl = mod.declPtr(decl_index); + try decl.renderFullyQualifiedName(mod, writer); + } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, .empty, writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.types.len == 0) { + return writer.writeAll("@TypeOf(.{})"); + } + try writer.writeAll("struct{"); + for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); + } + if (anon_struct.names.len != 0) { + try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); + } + + try print(Type.fromInterned(field_ty), writer, mod); + + if (val != .none) { + try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); + } + } + try writer.writeAll("}"); + }, + + .union_type => { + const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .opaque_type => { + const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_type => { + const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn ("); + const param_types = fn_info.param_types.get(&mod.intern_pool); + for (param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(param_ty), writer, mod); + } + } + if (fn_info.is_var_args) { + if (param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(Type.fromInterned(fn_info.return_type), writer, mod); + } + }, + .anyframe_type => |child| { + if (child == .none) return writer.writeAll("anyframe"); + try writer.writeAll("anyframe->"); + return print(Type.fromInterned(child), writer, mod); + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + } +} + +pub fn fromInterned(i: InternPool.Index) Type { + assert(i != .none); + return .{ .ip_index = i }; +} + +pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; +} + +pub fn toValue(self: Type) Value { + return Value.fromInterned(self.toIntern()); +} + +const RuntimeBitsError = SemaError || error{NeedLazy}; + +/// true if and only if the type takes up space in memory at runtime. +/// There are two reasons a type will return false: +/// * the type is a comptime-only type. For example, the type `type` itself. +/// - note, however, that a struct can have mixed fields and only the non-comptime-only +/// fields will count towards the ABI size. For example, `struct {T: type, x: i32}` +/// hasRuntimeBits()=true and abiSize()=4 +/// * the type has only one possible value, making its ABI size 0. +/// - an enum with an explicit tag type has the ABI size of the integer tag type, +/// making it one-possible-value only if the integer tag type has 0 bits. +/// When `ignore_comptime_only` is true, then types that are comptime-only +/// may return false positives. +pub fn hasRuntimeBitsAdvanced( + ty: Type, + mod: *Module, + ignore_comptime_only: bool, + strat: ResolveStratLazy, +) RuntimeBitsError!bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + // False because it is a comptime-only type. + .empty_struct_type => false, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.bits != 0, + .ptr_type => { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + return switch (strat) { + .sema => !try ty.comptimeOnlyAdvanced(mod, .sema), + .eager => !ty.comptimeOnly(mod), + .lazy => error.NeedLazy, + }; + }, + .anyframe_type => true, + .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and + try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .vector_type => |vector_type| return vector_type.len > 0 and + try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .opt_type => |child| { + const child_ty = Type.fromInterned(child); + if (child_ty.isNoReturn(mod)) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) return true; + return switch (strat) { + .sema => !try child_ty.comptimeOnlyAdvanced(mod, .sema), + .eager => !child_ty.comptimeOnly(mod), + .lazy => error.NeedLazy, + }; + }, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + => true, + + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .anyerror, + .adhoc_inferred_error_set, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => true, + + // These are false because they are comptime-only types. + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + => false, + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + return true; + } + switch (strat) { + .sema => try ty.resolveFields(mod), + .eager => assert(struct_type.haveFieldTypes(ip)), + .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, + } + for (0..struct_type.field_types.len) |i| { + if (struct_type.comptime_bits.getBit(ip, i)) continue; + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .anon_struct_type => |tuple| { + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + if (val != .none) continue; // comptime field + if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } + return false; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).runtime_tag) { + .none => { + if (union_type.flagsPtr(ip).status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_type.flagsPtr(ip).assumed_runtime_bits = true; + return true; + } + }, + .safety, .tagged => { + const tag_ty = union_type.tagTypePtr(ip).*; + // tag_ty will be `none` if this union's tag type is not resolved yet, + // in which case we want control flow to continue down below. + if (tag_ty != .none and + try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + { + return true; + } + }, + } + switch (strat) { + .sema => try ty.resolveFields(mod), + .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), + .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) + return error.NeedLazy, + } + for (0..union_type.field_types.len) |field_index| { + const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + + .opaque_type => true, + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +/// true if and only if the type has a well-defined memory layout +/// readFrom/writeToMemory are supported only for types with a well- +/// defined memory layout +pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .int_type, + .vector_type, + => true, + + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .anon_struct_type, + .opaque_type, + .anyframe_type, + // These are function bodies, not function pointers. + .func_type, + => false, + + .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod), + .opt_type => ty.isPtrLikeOptional(mod), + .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, + => true, + + .anyerror, + .adhoc_inferred_error_set, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, + => false, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + // Struct with no fields have a well-defined layout of no bits. + return struct_type.layout != .auto or struct_type.field_types.len == 0; + }, + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + return switch (union_type.flagsPtr(ip).runtime_tag) { + .none, .safety => union_type.flagsPtr(ip).layout != .auto, + .tagged => false, + }; + }, + .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }; +} + +pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; +} + +pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; +} + +pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { + return ty.fnHasRuntimeBitsAdvanced(mod, .normal) catch unreachable; +} + +/// Determines whether a function type has runtime bits, i.e. whether a +/// function with this type can exist at runtime. +/// Asserts that `ty` is a function type. +pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { + const fn_info = mod.typeToFunc(ty).?; + if (fn_info.is_generic) return false; + if (fn_info.is_var_args) return true; + if (fn_info.cc == .Inline) return false; + return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, strat); +} + +pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { + .Fn => return ty.fnHasRuntimeBits(mod), + else => return ty.hasRuntimeBits(mod), + } +} + +/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. +pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Fn => true, + else => return ty.hasRuntimeBitsIgnoreComptime(mod), + }; +} + +pub fn isNoReturn(ty: Type, mod: *Module) bool { + return mod.intern_pool.isNoReturn(ty.toIntern()); +} + +/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. +pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { + return ptrAlignmentAdvanced(ty, mod, .normal) catch unreachable; +} + +pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) !Alignment { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| { + if (ptr_type.flags.alignment != .none) + return ptr_type.flags.alignment; + + if (strat == .sema) { + const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .sema); + return res.scalar; + } + + return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + }, + .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, strat), + else => unreachable, + }; +} + +pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, + else => unreachable, + }; +} + +/// Never returns `none`. Asserts that all necessary type resolution is already done. +pub fn abiAlignment(ty: Type, mod: *Module) Alignment { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; +} + +/// May capture a reference to `ty`. +/// Returned value has type `comptime_int`. +pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { + .val => |val| return val, + .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), + } +} + +pub const AbiAlignmentAdvanced = union(enum) { + scalar: Alignment, + val: Value, +}; + +pub const ResolveStratLazy = enum { + /// Return a `lazy_size` or `lazy_align` value if necessary. + /// This value can be resolved later using `Value.resolveLazy`. + lazy, + /// Return a scalar result, expecting all necessary type resolution to be completed. + /// Backends should typically use this, since they must not perform type resolution. + eager, + /// Return a scalar result, performing type resolution as necessary. + /// This should typically be used from semantic analysis. + sema, +}; + +/// The chosen strategy can be easily optimized away in release builds. +/// However, in debug builds, it helps to avoid acceidentally resolving types in backends. +pub const ResolveStrat = enum { + /// Assert that all necessary resolution is completed. + /// Backends should typically use this, since they must not perform type resolution. + normal, + /// Perform type resolution as necessary using `Zcu`. + /// This should typically be used from semantic analysis. + sema, + + pub fn toLazy(strat: ResolveStrat) ResolveStratLazy { + return switch (strat) { + .normal => .eager, + .sema => .sema, + }; + } +}; + +/// If you pass `eager` you will get back `scalar` and assert the type is resolved. +/// In this case there will be no error, guaranteed. +/// If you pass `lazy` you may get back `scalar` or `val`. +/// If `val` is returned, a reference to `ty` has been captured. +/// If you pass `sema` you will get back `scalar` and resolve the type if +/// necessary, possibly returning a CompileError. +pub fn abiAlignmentAdvanced( + ty: Type, + mod: *Module, + strat: ResolveStratLazy, +) SemaError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const use_llvm = mod.comp.config.use_llvm; + const ip = &mod.intern_pool; + + switch (ty.toIntern()) { + .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; + }, + .ptr_type, .anyframe_type => { + return .{ .scalar = ptrAbiAlignment(target) }; + }, + .array_type => |array_type| { + return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + if (vector_type.len == 0) return .{ .scalar = .@"1" }; + switch (mod.comp.getZigBackend()) { + else => { + const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, .sema)); + if (elem_bits == 0) return .{ .scalar = .@"1" }; + const bytes = ((elem_bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return .{ .scalar = Alignment.fromByteUnits(alignment) }; + }, + .stage2_c => { + return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); + }, + .stage2_x86_64 => { + if (vector_type.child == .bool_type) { + if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; + if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx2)) return .{ .scalar = .@"32" }; + if (vector_type.len > 64) return .{ .scalar = .@"16" }; + const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return .{ .scalar = Alignment.fromByteUnits(alignment) }; + } + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + if (elem_bytes == 0) return .{ .scalar = .@"1" }; + const bytes = elem_bytes * vector_type.len; + if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; + if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" }; + return .{ .scalar = .@"16" }; + }, + } + }, + + .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), + + .error_set_type, .inferred_error_set_type => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; + }, + + // represents machine code; not a pointer + .func_type => return .{ .scalar = target_util.defaultFunctionAlignment(target) }, + + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return .{ .scalar = .@"1" }, + + .usize, + .isize, + => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target, use_llvm) }, + + .export_options, + .extern_options, + .type_info, + => return .{ .scalar = ptrAbiAlignment(target) }, + + .c_char => return .{ .scalar = cTypeAlign(target, .char) }, + .c_short => return .{ .scalar = cTypeAlign(target, .short) }, + .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, + .c_int => return .{ .scalar = cTypeAlign(target, .int) }, + .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, + .c_long => return .{ .scalar = cTypeAlign(target, .long) }, + .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, + .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, + .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, + .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, + + .f16 => return .{ .scalar = .@"2" }, + .f32 => return .{ .scalar = cTypeAlign(target, .float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return .{ .scalar = cTypeAlign(target, .double) }, + else => return .{ .scalar = .@"8" }, + }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return .{ .scalar = abiAlignment(u80_ty, mod) }; + }, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, + else => return .{ .scalar = .@"16" }, + }, + + .anyerror, .adhoc_inferred_error_set => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; + return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; + }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return .{ .scalar = .@"1" }, + + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") { + switch (strat) { + .sema => try ty.resolveLayout(mod), + .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))), + }, + .eager => {}, + } + return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; + } + + if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) { + .eager => unreachable, // struct alignment not resolved + .sema => try ty.resolveStructAlignment(mod), + .lazy => return .{ .val = Value.fromInterned(try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } })) }, + }; + + return .{ .scalar = struct_type.flagsPtr(ip).alignment }; + }, + .anon_struct_type => |tuple| { + var big_align: Alignment = .@"1"; + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + if (val != .none) continue; // comptime field + switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = big_align.max(field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }, + } + } + return .{ .scalar = big_align }; + }, + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + + if (union_type.flagsPtr(ip).alignment == .none) switch (strat) { + .eager => unreachable, // union layout not resolved + .sema => try ty.resolveUnionAlignment(mod), + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }; + + return .{ .scalar = union_type.flagsPtr(ip).alignment }; + }, + .opaque_type => return .{ .scalar = .@"1" }, + .enum_type => return .{ + .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } +} + +fn abiAlignmentAdvancedErrorUnion( + ty: Type, + mod: *Module, + strat: ResolveStratLazy, + payload_ty: Type, +) SemaError!AbiAlignmentAdvanced { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const code_align = abiAlignment(Type.anyerror, mod); + switch (strat) { + .eager, .sema => { + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + return .{ .scalar = code_align }; + } + return .{ .scalar = code_align.max( + (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, + ) }; + }, + .lazy => { + switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, + .val => {}, + } + return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }; + }, + } +} + +fn abiAlignmentAdvancedOptional( + ty: Type, + mod: *Module, + strat: ResolveStratLazy, +) SemaError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const child_type = ty.optionalChild(mod); + + switch (child_type.zigTypeTag(mod)) { + .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), + .NoReturn => return .{ .scalar = .@"1" }, + else => {}, + } + + switch (strat) { + .eager, .sema => { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + return .{ .scalar = .@"1" }; + } + return child_type.abiAlignmentAdvanced(mod, strat); + }, + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| return .{ .scalar = x.max(.@"1") }, + .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.toIntern() }, + } }))) }, + }, + } +} + +/// May capture a reference to `ty`. +pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { + .val => |val| return val, + .scalar => |x| return mod.intValue(Type.comptime_int, x), + } +} + +/// Asserts the type has the ABI size already resolved. +/// Types that return false for hasRuntimeBits() return 0. +pub fn abiSize(ty: Type, mod: *Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; +} + +const AbiSizeAdvanced = union(enum) { + scalar: u64, + val: Value, +}; + +/// If you pass `eager` you will get back `scalar` and assert the type is resolved. +/// In this case there will be no error, guaranteed. +/// If you pass `lazy` you may get back `scalar` or `val`. +/// If `val` is returned, a reference to `ty` has been captured. +/// If you pass `sema` you will get back `scalar` and resolve the type if +/// necessary, possibly returning a CompileError. +pub fn abiSizeAdvanced( + ty: Type, + mod: *Module, + strat: ResolveStratLazy, +) SemaError!AbiSizeAdvanced { + const target = mod.getTarget(); + const use_llvm = mod.comp.config.use_llvm; + const ip = &mod.intern_pool; + + switch (ty.toIntern()) { + .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; + }, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, + .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .array_type => |array_type| { + const len = array_type.lenIncludingSentinel(); + if (len == 0) return .{ .scalar = 0 }; + switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return .{ .scalar = len * elem_size }, + .val => switch (strat) { + .sema, .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + } + }, + .vector_type => |vector_type| { + const sub_strat: ResolveStrat = switch (strat) { + .sema => .sema, + .eager => .normal, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }; + const total_bytes = switch (mod.comp.getZigBackend()) { + else => total_bytes: { + const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, sub_strat); + const total_bits = elem_bits * vector_type.len; + break :total_bytes (total_bits + 7) / 8; + }, + .stage2_c => total_bytes: { + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, + .stage2_x86_64 => total_bytes: { + if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, + }; + return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; + }, + + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + + .error_set_type, .inferred_error_set_type => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + }, + + .error_union_type => |error_union_type| { + const payload_ty = Type.fromInterned(error_union_type.payload_type); + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const code_size = abiSize(Type.anyerror, mod); + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(payload_ty, mod); + const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + }; + + var size: u64 = 0; + if (code_align.compare(.gt, payload_align)) { + size += code_size; + size = payload_align.forward(size); + size += payload_size; + size = code_align.forward(size); + } else { + size += payload_size; + size = code_align.forward(size); + size += code_size; + size = payload_align.forward(size); + } + return AbiSizeAdvanced{ .scalar = size }; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ .ip_index = .u80_type }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, + }, + + .usize, + .isize, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + .anyerror, .adhoc_inferred_error_set => { + const bits = mod.errorSetBits(); + if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; + }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + switch (strat) { + .sema => try ty.resolveLayout(mod), + .lazy => switch (struct_type.layout) { + .@"packed" => { + if (struct_type.backingIntType(ip).* == .none) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }; + }, + .auto, .@"extern" => { + if (!struct_type.haveLayout(ip)) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }; + }, + }, + .eager => {}, + } + switch (struct_type.layout) { + .@"packed" => return .{ + .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), + }, + .auto, .@"extern" => { + assert(struct_type.haveLayout(ip)); + return .{ .scalar = struct_type.size(ip).* }; + }, + } + }, + .anon_struct_type => |tuple| { + switch (strat) { + .sema => try ty.resolveLayout(mod), + .lazy, .eager => {}, + } + const field_count = tuple.types.len; + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (strat) { + .sema => try ty.resolveLayout(mod), + .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ + .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))), + }, + .eager => {}, + } + + assert(union_type.haveLayout(ip)); + return .{ .scalar = union_type.size(ip).* }; + }, + .opaque_type => unreachable, // no size available + .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + } +} + +fn abiSizeAdvancedOptional( + ty: Type, + mod: *Module, + strat: ResolveStratLazy, +) SemaError!AbiSizeAdvanced { + const child_ty = ty.optionalChild(mod); + + if (child_ty.isNoReturn(mod)) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + + if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + else => |e| return e, + })) return AbiSizeAdvanced{ .scalar = 1 }; + + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_ty, mod, strat); + } + + const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.toIntern() }, + } }))) }, + }, + }; + + // Optional types are represented as a struct with the child type as the first + // field and a boolean as the second. Since the child type's abi alignment is + // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal + // to the child type's ABI alignment. + return AbiSizeAdvanced{ + .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, + }; +} + +pub fn ptrAbiAlignment(target: Target) Alignment { + return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); +} + +pub fn intAbiSize(bits: u16, target: Target, use_llvm: bool) u64 { + return intAbiAlignment(bits, target, use_llvm).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); +} + +pub fn intAbiAlignment(bits: u16, target: Target, use_llvm: bool) Alignment { + return switch (target.cpu.arch) { + .x86 => switch (bits) { + 0 => .none, + 1...8 => .@"1", + 9...16 => .@"2", + 17...64 => .@"4", + else => .@"16", + }, + .x86_64 => switch (bits) { + 0 => .none, + 1...8 => .@"1", + 9...16 => .@"2", + 17...32 => .@"4", + 33...64 => .@"8", + else => switch (target_util.zigBackend(target, use_llvm)) { + .stage2_x86_64 => .@"8", + else => .@"16", + }, + }, + else => return Alignment.fromByteUnits(@min( + std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), + maxIntAlignment(target, use_llvm), + )), + }; +} + +pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { + return switch (target.cpu.arch) { + .avr => 1, + .msp430 => 2, + .xcore => 4, + + .arm, + .armeb, + .thumb, + .thumbeb, + .hexagon, + .mips, + .mipsel, + .powerpc, + .powerpcle, + .r600, + .amdgcn, + .riscv32, + .sparc, + .sparcel, + .s390x, + .lanai, + .wasm32, + .wasm64, + => 8, + + // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16 + // is a relevant number in three cases: + // 1. Different machine code instruction when loading into SIMD register. + // 2. The C ABI wants 16 for extern structs. + // 3. 16-byte cmpxchg needs 16-byte alignment. + // Same logic for powerpc64, mips64, sparc64. + .powerpc64, + .powerpc64le, + .mips64, + .mips64el, + .sparc64, + => switch (target.ofmt) { + .c => 16, + else => 8, + }, + + .x86_64 => switch (target_util.zigBackend(target, use_llvm)) { + .stage2_x86_64 => 8, + else => 16, + }, + + // Even LLVMABIAlignmentOfType(i128) agrees on these targets. + .x86, + .aarch64, + .aarch64_be, + .aarch64_32, + .riscv64, + .bpfel, + .bpfeb, + .nvptx, + .nvptx64, + => 16, + + // Below this comment are unverified but based on the fact that C requires + // int128_t to be 16 bytes aligned, it's a safe default. + .spu_2, + .csky, + .arc, + .m68k, + .tce, + .tcele, + .le32, + .amdil, + .hsail, + .spir, + .kalimba, + .renderscript32, + .spirv, + .spirv32, + .shave, + .le64, + .amdil64, + .hsail64, + .spir64, + .renderscript64, + .ve, + .spirv64, + .dxil, + .loongarch32, + .loongarch64, + .xtensa, + => 16, + }; +} + +pub fn bitSize(ty: Type, mod: *Module) u64 { + return bitSizeAdvanced(ty, mod, .normal) catch unreachable; +} + +pub fn bitSizeAdvanced( + ty: Type, + mod: *Module, + strat: ResolveStrat, +) SemaError!u64 { + const target = mod.getTarget(); + const ip = &mod.intern_pool; + + const strat_lazy: ResolveStratLazy = strat.toLazy(); + + switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), + }, + .anyframe_type => return target.ptrBitWidth(), + + .array_type => |array_type| { + const len = array_type.lenIncludingSentinel(); + if (len == 0) return 0; + const elem_ty = Type.fromInterned(array_type.child); + const elem_size = @max( + (try elem_ty.abiAlignmentAdvanced(mod, strat_lazy)).scalar.toByteUnits() orelse 0, + (try elem_ty.abiSizeAdvanced(mod, strat_lazy)).scalar, + ); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, strat); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = Type.fromInterned(vector_type.child); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, strat); + return elem_bit_size * vector_type.len; + }, + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; + }, + + .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), + + .error_union_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat_lazy)).scalar * 8; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + .anyerror, + .adhoc_inferred_error_set, + => return mod.errorSetBits(), + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + + .atomic_order => unreachable, + .atomic_rmw_op => unreachable, + .calling_convention => unreachable, + .address_space => unreachable, + .float_mode => unreachable, + .reduce_op => unreachable, + .call_modifier => unreachable, + .prefetch_options => unreachable, + .export_options => unreachable, + .extern_options => unreachable, + .type_info => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + const is_packed = struct_type.layout == .@"packed"; + if (strat == .sema) { + try ty.resolveFields(mod); + if (is_packed) try ty.resolveLayout(mod); + } + if (is_packed) { + return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, strat); + } + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; + }, + + .anon_struct_type => { + if (strat == .sema) try ty.resolveFields(mod); + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + const is_packed = ty.containerLayout(mod) == .@"packed"; + if (strat == .sema) { + try ty.resolveFields(mod); + if (is_packed) try ty.resolveLayout(mod); + } + if (!is_packed) { + return (try ty.abiSizeAdvanced(mod, strat_lazy)).scalar * 8; + } + assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + + var size: u64 = 0; + for (0..union_type.field_types.len) |field_index| { + const field_ty = union_type.field_types.get(ip)[field_index]; + size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, strat)); + } + + return size; + }, + .opaque_type => unreachable, + .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, strat), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + } +} + +/// Returns true if the type's layout is already resolved and it is safe +/// to use `abiSize`, `abiAlignment` and `bitSize` on it. +pub fn layoutIsResolved(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip), + .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip), + .array_type => |array_type| { + if (array_type.lenIncludingSentinel() == 0) return true; + return Type.fromInterned(array_type.child).layoutIsResolved(mod); + }, + .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod), + .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod), + else => true, + }; +} + +pub fn isSinglePointer(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size == .One, + else => false, + }; +} + +/// Asserts `ty` is a pointer. +pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; +} + +/// Returns `null` if `ty` is not a pointer. +pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_info| ptr_info.flags.size, + else => null, + }; +} + +pub fn isSlice(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, + else => false, + }; +} + +pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { + return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern())); +} + +pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_const, + else => false, + }; +} + +pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, &mod.intern_pool); +} + +pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { + return switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_volatile, + else => false, + }; +} + +pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, + .opt_type => true, + else => false, + }; +} + +pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; +} + +pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.flags.size) { + .Slice, .C => false, + .Many, .One => !p.flags.is_allowzero, + }, + else => false, + }, + else => false, + }; +} + +/// For pointer-like optionals, returns true, otherwise returns the allowzero property +/// of pointers. +pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { + return true; + } + return ty.ptrInfo(mod).flags.is_allowzero; +} + +/// See also `isPtrLikeOptional`. +pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, + .error_set_type, .inferred_error_set_type => true, + else => false, + }, + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + else => false, + }; +} + +/// Returns true if the type is optional and would be lowered to a single pointer +/// address value, using 0 for null. Note that this returns true for C pointers. +/// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. +pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.flags.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.flags.is_allowzero, + }, + else => false, + }, + else => false, + }; +} + +/// For *[N]T, returns [N]T. +/// For *T, returns T. +/// For [*]T, returns T. +pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, &mod.intern_pool); +} + +pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { + return Type.fromInterned(ip.childType(ty.toIntern())); +} + +/// For *[N]T, returns T. +/// For ?*T, returns T. +/// For ?*[N]T, returns T. +/// For ?[*]T, returns T. +/// For *T, returns T. +/// For [*]T, returns T. +/// For [N]T, returns T. +/// For []T, returns T. +/// For anyframe->T, returns T. +pub fn elemType2(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One => Type.fromInterned(ptr_type.child).shallowElemType(mod), + .Many, .C, .Slice => Type.fromInterned(ptr_type.child), + }, + .anyframe_type => |child| { + assert(child != .none); + return Type.fromInterned(child); + }, + .vector_type => |vector_type| Type.fromInterned(vector_type.child), + .array_type => |array_type| Type.fromInterned(array_type.child), + .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)), + else => unreachable, + }; +} + +fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { + .Array, .Vector => child_ty.childType(mod), + else => child_ty, + }; +} + +/// For vectors, returns the element type. Otherwise returns self. +pub fn scalarType(ty: Type, mod: *Module) Type { + return switch (ty.zigTypeTag(mod)) { + .Vector => ty.childType(mod), + else => ty, + }; +} + +/// Asserts that the type is an optional. +/// Note that for C pointers this returns the type unmodified. +pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child| Type.fromInterned(child), + .ptr_type => |ptr_type| b: { + assert(ptr_type.flags.size == .C); + break :b ty; + }, + else => unreachable, + }; +} + +/// Returns the tag type of a union, if the type is a union and it has a tag type. +/// Otherwise, returns `null`. +pub fn unionTagType(ty: Type, mod: *Module) ?Type { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .union_type => {}, + else => return null, + } + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).runtime_tag) { + .tagged => { + assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + return Type.fromInterned(union_type.enum_tag_ty); + }, + else => return null, + } +} + +/// Same as `unionTagType` but includes safety tag. +/// Codegen should use this version. +pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + if (!union_type.hasTag(ip)) return null; + assert(union_type.haveFieldTypes(ip)); + return Type.fromInterned(union_type.enum_tag_ty); + }, + else => null, + }; +} + +/// Asserts the type is a union; returns the tag type, even if the tag will +/// not be stored at runtime. +pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { + const union_obj = mod.typeToUnion(ty).?; + return Type.fromInterned(union_obj.enum_tag_ty); +} + +pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + const union_fields = union_obj.field_types.get(ip); + const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null; + return Type.fromInterned(union_fields[index]); +} + +pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + return Type.fromInterned(union_obj.field_types.get(ip)[index]); +} + +pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const union_obj = mod.typeToUnion(ty).?; + return mod.unionTagFieldIndex(union_obj, enum_tag); +} + +pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + const union_obj = mod.typeToUnion(ty).?; + for (union_obj.field_types.get(ip)) |field_ty| { + if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; + } + return true; +} + +/// Returns the type used for backing storage of this union during comptime operations. +/// Asserts the type is either an extern or packed union. +pub fn unionBackingType(ty: Type, mod: *Module) !Type { + return switch (ty.containerLayout(mod)) { + .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), + .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), + .auto => unreachable, + }; +} + +pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { + const ip = &mod.intern_pool; + const union_obj = ip.loadUnionType(ty.toIntern()); + return mod.getUnionLayout(union_obj); +} + +pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).layout, + .anon_struct_type => .auto, + .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, + else => unreachable, + }; +} + +/// Asserts that the type is an error union. +pub fn errorUnionPayload(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type); +} + +/// Asserts that the type is an error union. +pub fn errorUnionSet(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern())); +} + +/// Returns false for unresolved inferred error sets. +pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type, .adhoc_inferred_error_set_type => false, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none, .anyerror_type => false, + else => |t| ip.indexToKey(t).error_set_type.names.len == 0, + }, + else => unreachable, + }, + }; +} + +/// Returns true if it is an error set that includes anyerror, false otherwise. +/// Note that the result may be a false negative if the type did not get error set +/// resolution prior to this call. +pub fn isAnyError(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + .adhoc_inferred_error_set_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, + else => false, + }, + }; +} + +pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { + .ErrorUnion, .ErrorSet => true, + else => false, + }; +} + +/// Returns whether ty, which must be an error set, includes an error `name`. +/// Might return a false negative if `ty` is an inferred error set and not fully +/// resolved yet. +pub fn errorSetHasFieldIp( + ip: *const InternPool, + ty: InternPool.Index, + name: InternPool.NullTerminatedString, +) bool { + return switch (ty) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, + }, + else => unreachable, + }, + }; +} + +/// Returns whether ty, which must be an error set, includes an error `name`. +/// Might return a false negative if `ty` is an inferred error set and not fully +/// resolved yet. +pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |error_set_type| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return error_set_type.nameIndex(ip, field_name_interned) != null; + }, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; + }, + }, + else => unreachable, + }, + }; +} + +/// Asserts the type is an array or vector or struct. +pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return ty.arrayLenIp(&mod.intern_pool); +} + +pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { + return ip.aggregateTypeLen(ty.toIntern()); +} + +pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern()); +} + +pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type => |vector_type| vector_type.len, + .anon_struct_type => |tuple| @intCast(tuple.types.len), + else => unreachable, + }; +} + +/// Asserts the type is an array, pointer or vector. +pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .vector_type, + .struct_type, + .anon_struct_type, + => null, + + .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, + .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, + + else => unreachable, + }; +} + +/// Returns true if and only if the type is a fixed-width integer. +pub fn isInt(self: Type, mod: *const Module) bool { + return self.toIntern() != .comptime_int_type and + mod.intern_pool.isIntegerType(self.toIntern()); +} + +/// Returns true if and only if the type is a fixed-width, signed integer. +pub fn isSignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type => mod.getTarget().charSignedness() == .signed, + .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .signed, + else => false, + }, + }; +} + +/// Returns true if and only if the type is a fixed-width, unsigned integer. +pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .c_char_type => mod.getTarget().charSignedness() == .unsigned, + .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| int_type.signedness == .unsigned, + else => false, + }, + }; +} + +/// Returns true for integers, enums, error sets, and packed structs. +/// If this function returns true, then intInfo() can be called on the type. +pub fn isAbiInt(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Int, .Enum, .ErrorSet => true, + .Struct => ty.containerLayout(mod) == .@"packed", + else => false, + }; +} + +/// Asserts the type is an integer, enum, error set, or vector of one of them. +pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { + const ip = &mod.intern_pool; + const target = mod.getTarget(); + var ty = starting_ty; + + while (true) switch (ty.toIntern()) { + .anyerror_type, .adhoc_inferred_error_set_type => { + return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; + }, + .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.c_type_bit_size(.char) }, + .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| return int_type, + .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), + .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), + .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), + + .error_set_type, .inferred_error_set_type => { + return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; + }, + + .anon_struct_type => unreachable, + + .ptr_type => unreachable, + .anyframe_type => unreachable, + .array_type => unreachable, + + .opt_type => unreachable, + .error_union_type => unreachable, + .func_type => unreachable, + .simple_type => unreachable, // handled via Index enum tag above + + .union_type => unreachable, + .opaque_type => unreachable, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +pub fn isNamedInt(ty: Type) bool { + return switch (ty.toIntern()) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + else => false, + }; +} + +/// Returns `false` for `comptime_float`. +pub fn isRuntimeFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + => true, + + else => false, + }; +} + +/// Returns `true` for `comptime_float`. +pub fn isAnyFloat(ty: Type) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_float_type, + => true, + + else => false, + }; +} + +/// Asserts the type is a fixed-size float or comptime_float. +/// Returns 128 for comptime_float types. +pub fn floatBits(ty: Type, target: Target) u16 { + return switch (ty.toIntern()) { + .f16_type => 16, + .f32_type => 32, + .f64_type => 64, + .f80_type => 80, + .f128_type, .comptime_float_type => 128, + .c_longdouble_type => target.c_type_bit_size(.longdouble), + + else => unreachable, + }; +} + +/// Asserts the type is a function or a function pointer. +pub fn fnReturnType(ty: Type, mod: *Module) Type { + return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern())); +} + +/// Asserts the type is a function. +pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; +} + +pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { + .Opaque, .NoReturn => false, + else => true, + }; +} + +pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { + .Opaque => false, + else => true, + }; +} + +/// Asserts the type is a function. +pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; +} + +pub fn isNumeric(ty: Type, mod: *const Module) bool { + return switch (ty.toIntern()) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_int_type, + .comptime_float_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => true, + else => false, + }, + }; +} + +/// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which +/// resolves field types rather than asserting they are already resolved. +pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { + var ty = starting_type; + const ip = &mod.intern_pool; + while (true) switch (ty.toIntern()) { + .empty_struct_type => return Value.empty_struct, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); + } else { + return null; + } + }, + + .ptr_type, + .error_union_type, + .func_type, + .anyframe_type, + .error_set_type, + .inferred_error_set_type, + => return null, + + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } }))); + if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, + } }))); + } + return null; + }, + .opt_type => |child| { + if (child == .noreturn_type) { + return try mod.nullValue(ty); + } else { + return null; + } + }, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + .adhoc_inferred_error_set, + => return null, + + .void => return Value.void, + .noreturn => return Value.@"unreachable", + .null => return Value.null, + .undefined => return Value.undef, + + .generic_poison => unreachable, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.haveFieldTypes(ip)); + if (struct_type.knownNonOpv(ip)) + return null; + const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); + defer mod.gpa.free(field_vals); + for (field_vals, 0..) |*field_val, i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) { + assert(struct_type.haveFieldInits(ip)); + field_val.* = struct_type.field_inits.get(ip)[i]; + continue; + } + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + if (try field_ty.onePossibleValue(mod)) |field_opv| { + field_val.* = field_opv.toIntern(); + } else return null; + } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }))); + }, + + .anon_struct_type => |tuple| { + for (tuple.values.get(ip)) |val| { + if (val == .none) return null; + } + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); + defer mod.gpa.free(duped_values); + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = duped_values }, + } }))); + }, + + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse + return null; + if (union_obj.field_types.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return Value.fromInterned(only); + } + const only_field_ty = union_obj.field_types.get(ip)[0]; + const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), + } }); + return Value.fromInterned(only); + }, + .opaque_type => return null, + .enum_type => { + const enum_type = ip.loadEnumType(ty.toIntern()); + switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = int_opv.toIntern(), + } }); + return Value.fromInterned(only); + } + + return null; + }, + .auto, .explicit => { + if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return Value.fromInterned(only); + }, + 1 => { + if (enum_type.values.len == 0) { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return Value.fromInterned(only); + } else { + return Value.fromInterned(enum_type.values.get(ip)[0]); + } + }, + else => return null, + } + }, + } + }, + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +/// During semantic analysis, instead call `Sema.typeRequiresComptime` which +/// resolves field types rather than asserting they are already resolved. +pub fn comptimeOnly(ty: Type, mod: *Module) bool { + return ty.comptimeOnlyAdvanced(mod, .normal) catch unreachable; +} + +/// `generic_poison` will return false. +/// May return false negatives when structs and unions are having their field types resolved. +pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, strat: ResolveStrat) SemaError!bool { + const ip = &mod.intern_pool; + return switch (ty.toIntern()) { + .empty_struct_type => false, + + else => switch (ip.indexToKey(ty.toIntern())) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = Type.fromInterned(ptr_type.child); + switch (child_ty.zigTypeTag(mod)) { + .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, strat), + .Opaque => return false, + else => return child_ty.comptimeOnlyAdvanced(mod, strat), + } + }, + .anyframe_type => |child| { + if (child == .none) return false; + return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat); + }, + .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, strat), + .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, strat), + .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, strat), + .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, strat), + + .error_set_type, + .inferred_error_set_type, + => false, + + // These are function bodies, not function pointers. + .func_type => true, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .adhoc_inferred_error_set, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + // packed structs cannot be comptime-only because they have a well-defined + // memory layout and every field has a well-defined bit pattern. + if (struct_type.layout == .@"packed") + return false; + + // A struct with no fields is not comptime-only. + return switch (struct_type.flagsPtr(ip).requires_comptime) { + .no, .wip => false, + .yes => true, + .unknown => { + assert(strat == .sema); + + if (struct_type.flagsPtr(ip).field_types_wip) + return false; + + struct_type.flagsPtr(ip).requires_comptime = .wip; + errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; + + try ty.resolveFields(mod); + + for (0..struct_type.field_types.len) |i_usize| { + const i: u32 = @intCast(i_usize); + if (struct_type.fieldIsComptime(ip, i)) continue; + const field_ty = struct_type.field_types.get(ip)[i]; + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { + // Note that this does not cause the layout to + // be considered resolved. Comptime-only types + // still maintain a layout of their + // runtime-known fields. + struct_type.flagsPtr(ip).requires_comptime = .yes; + return true; + } + } + + struct_type.flagsPtr(ip).requires_comptime = .no; + return false; + }, + }; + }, + + .anon_struct_type => |tuple| { + for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) return true; + } + return false; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + switch (union_type.flagsPtr(ip).requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + assert(strat == .sema); + + if (union_type.flagsPtr(ip).status == .field_types_wip) + return false; + + union_type.flagsPtr(ip).requires_comptime = .wip; + errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; + + try ty.resolveFields(mod); + + for (0..union_type.field_types.len) |field_idx| { + const field_ty = union_type.field_types.get(ip)[field_idx]; + if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, strat)) { + union_type.flagsPtr(ip).requires_comptime = .yes; + return true; + } + } + + union_type.flagsPtr(ip).requires_comptime = .no; + return false; + }, + } + }, + + .opaque_type => false, + + .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, strat), + + // values, not types + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, + }, + }; +} + +pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; +} + +/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. +pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { + if (!ty.isVector(zcu)) return 0; + const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; + return v.len * Type.fromInterned(v.child).bitSize(zcu); +} + +pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + else => false, + }; +} + +pub fn isIndexable(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + .Pointer => switch (ty.ptrSize(mod)) { + .Slice, .Many, .C => true, + .One => switch (ty.childType(mod).zigTypeTag(mod)) { + .Array, .Vector => true, + .Struct => ty.childType(mod).isTuple(mod), + else => false, + }, + }, + .Struct => ty.isTuple(mod), + else => false, + }; +} + +pub fn indexableHasLen(ty: Type, mod: *Module) bool { + return switch (ty.zigTypeTag(mod)) { + .Array, .Vector => true, + .Pointer => switch (ty.ptrSize(mod)) { + .Many, .C => false, + .Slice => true, + .One => switch (ty.childType(mod).zigTypeTag(mod)) { + .Array, .Vector => true, + .Struct => ty.childType(mod).isTuple(mod), + else => false, + }, + }, + .Struct => ty.isTuple(mod), + else => false, + }; +} + +/// Asserts that the type can have a namespace. +pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { + return ty.getNamespace(zcu).?; +} + +/// Returns null if the type has no namespace. +pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace, + .struct_type => ip.loadStructType(ty.toIntern()).namespace, + .union_type => ip.loadUnionType(ty.toIntern()).namespace, + .enum_type => ip.loadEnumType(ty.toIntern()).namespace, + + .anon_struct_type => .none, + .simple_type => |s| switch (s) { + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => .none, + else => null, + }, + + else => null, + }; +} + +// Works for vectors and vectors of integers. +pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } }))) else scalar; +} + +/// Asserts that the type is an integer. +pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); + if (info.bits == 0) return mod.intValue(dest_ty, -1); + + if (std.math.cast(u6, info.bits - 1)) |shift| { + const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + } + + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); + + return mod.intValue_big(dest_ty, res.toConst()); +} + +// Works for vectors and vectors of integers. +/// The returned Value will have type dest_ty. +pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); + return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, + } }))) else scalar; +} + +/// The returned Value will have type dest_ty. +pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { + const info = ty.intInfo(mod); + + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, -1), + .unsigned => try mod.intValue(dest_ty, 0), + }, + 1 => return switch (info.signedness) { + .signed => try mod.intValue(dest_ty, 0), + .unsigned => try mod.intValue(dest_ty, 1), + }, + else => {}, + } + + if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { + .signed => { + const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + }, + .unsigned => { + const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); + return mod.intValue(dest_ty, n); + }, + }; + + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); + + return mod.intValue_big(dest_ty, res.toConst()); +} + +/// Asserts the type is an enum or a union. +pub fn intTagType(ty: Type, mod: *Module) Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod), + .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), + else => unreachable, + }; +} + +pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, + }, + else => false, + }; +} + +// Asserts that `ty` is an error set and not `anyerror`. +// Asserts that `ty` is resolved if it is an inferred error set. +pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |x| x.names, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none => unreachable, // unresolved inferred error set + .anyerror_type => unreachable, + else => |t| ip.indexToKey(t).error_set_type.names, + }, + else => unreachable, + }; +} + +pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { + return mod.intern_pool.loadEnumType(ty.toIntern()).names; +} + +pub fn enumFieldCount(ty: Type, mod: *Module) usize { + return mod.intern_pool.loadEnumType(ty.toIntern()).names.len; +} + +pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index]; +} + +pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + return enum_type.nameIndex(ip, field_name); +} + +/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or +/// an integer which represents the enum value. Returns the field index in +/// declaration order, or `null` if `enum_tag` does not match any field. +pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { + const ip = &mod.intern_pool; + const enum_type = ip.loadEnumType(ty.toIntern()); + const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { + .int => enum_tag.toIntern(), + .enum_tag => |info| info.int, + else => unreachable, + }; + assert(ip.typeOf(int_tag) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, int_tag); +} + +/// Returns none in the case of a tuple which uses the integer index as the field name. +pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), + .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), + else => unreachable, + }; +} + +pub fn structFieldCount(ty: Type, mod: *Module) u32 { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, + .anon_struct_type => |anon_struct| anon_struct.types.len, + else => unreachable, + }; +} + +/// Supports structs and unions. +pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + return Type.fromInterned(union_obj.field_types.get(ip)[index]); + }, + .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), + else => unreachable, + }; +} + +pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { + return ty.structFieldAlignAdvanced(index, zcu, .normal) catch unreachable; +} + +pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, strat: ResolveStrat) !Alignment { + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.layout != .@"packed"); + const explicit_align = struct_type.fieldAlign(ip, index); + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); + return zcu.structFieldAlignmentAdvanced(explicit_align, field_ty, struct_type.layout, strat); + }, + .anon_struct_type => |anon_struct| { + return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; + }, + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + return zcu.unionFieldNormalAlignmentAdvanced(union_obj, @intCast(index), strat); + }, + else => unreachable, + } +} + +pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + const val = struct_type.fieldInit(ip, index); + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return Value.fromInterned(val); + }, + .anon_struct_type => |anon_struct| { + const val = anon_struct.values.get(ip)[index]; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return Value.fromInterned(val); + }, + else => unreachable, + } +} + +pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.fieldIsComptime(ip, index)) { + assert(struct_type.haveFieldInits(ip)); + return Value.fromInterned(struct_type.field_inits.get(ip)[index]); + } else { + return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); + } + }, + .anon_struct_type => |tuple| { + const val = tuple.values.get(ip)[index]; + if (val == .none) { + return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); + } else { + return Value.fromInterned(val); + } + }, + else => unreachable, + } +} + +pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), + .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, + else => unreachable, + }; +} + +pub const FieldOffset = struct { + field: usize, + offset: u64, +}; + +/// Supports structs and unions. +pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { + const ip = &mod.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + assert(struct_type.haveLayout(ip)); + assert(struct_type.layout != .@"packed"); + return struct_type.offsets.get(ip)[index]; + }, + + .anon_struct_type => |tuple| { + var offset: u64 = 0; + var big_align: Alignment = .none; + + for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { + if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { + // comptime field + if (i == index) return offset; + continue; + } + + const field_align = Type.fromInterned(field_ty).abiAlignment(mod); + big_align = big_align.max(field_align); + offset = field_align.forward(offset); + if (i == index) return offset; + offset += Type.fromInterned(field_ty).abiSize(mod); + } + offset = big_align.max(.@"1").forward(offset); + return offset; + }, + + .union_type => { + const union_type = ip.loadUnionType(ty.toIntern()); + if (!union_type.hasTag(ip)) + return 0; + const layout = mod.getUnionLayout(union_type); + if (layout.tag_align.compare(.gte, layout.payload_align)) { + // {Tag, Payload} + return layout.payload_align.forward(layout.tag_size); + } else { + // {Payload, Tag} + return 0; + } + }, + + else => unreachable, + } +} + +pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { + return ty.getOwnerDeclOrNull(mod) orelse unreachable; +} + +pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).decl, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, + .enum_type => ip.loadEnumType(ty.toIntern()).decl, + else => null, + }; +} + +pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { + const ip = &zcu.intern_pool; + return .{ + .base_node_inst = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }, + .offset = Module.LazySrcLoc.Offset.nodeOffset(0), + }; +} + +pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc { + return ty.srcLocOrNull(zcu).?; +} + +pub fn isGenericPoison(ty: Type) bool { + return ty.toIntern() == .generic_poison_type; +} + +pub fn isTuple(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; + }, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, + else => false, + }; +} + +pub fn isAnonStruct(ty: Type, mod: *Module) bool { + if (ty.toIntern() == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, + else => false, + }; +} + +pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + const struct_type = ip.loadStructType(ty.toIntern()); + if (struct_type.layout == .@"packed") return false; + if (struct_type.decl == .none) return false; + return struct_type.flagsPtr(ip).is_tuple; + }, + .anon_struct_type => true, + else => false, + }; +} + +pub fn isSimpleTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, + else => false, + }; +} + +pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => true, + else => false, + }; +} + +/// Traverses optional child types and error union payloads until the type +/// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`. +pub fn optEuBaseType(ty: Type, mod: *Module) Type { + var cur = ty; + while (true) switch (cur.zigTypeTag(mod)) { + .Optional => cur = cur.optionalChild(mod), + .ErrorUnion => cur = cur.errorUnionPayload(mod), + else => return cur, + }; +} + +pub fn toUnsigned(ty: Type, mod: *Module) !Type { + return switch (ty.zigTypeTag(mod)) { + .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), + .Vector => try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), + }), + else => unreachable, + }; +} + +pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).zir_index, + .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), + .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, + else => null, + }; +} + +pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { + const ip = &zcu.intern_pool; + const tracked = switch (ip.indexToKey(ty.toIntern())) { + .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { + .declared => |d| d.zir_index, + .reified => |r| r.zir_index, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => return null, + }, + else => return null, + }; + const info = tracked.resolveFull(&zcu.intern_pool); + const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; + assert(file.zir_loaded); + const zir = file.zir; + const inst = zir.instructions.get(@intFromEnum(info.inst)); + assert(inst.tag == .extended); + return switch (inst.data.extended.opcode) { + .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, + .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, + .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, + .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, + .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, + else => unreachable, + }; +} + +/// Given a namespace type, returns its list of caotured values. +pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).captures, + .union_type => ip.loadUnionType(ty.toIntern()).captures, + .enum_type => ip.loadEnumType(ty.toIntern()).captures, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures, + else => unreachable, + }; +} + +pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { + var cur_ty: Type = ty; + var cur_len: u64 = 1; + while (cur_ty.zigTypeTag(zcu) == .Array) { + cur_len *= cur_ty.arrayLenIncludingSentinel(zcu); + cur_ty = cur_ty.childType(zcu); + } + return .{ cur_ty, cur_len }; +} + +pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { + /// The result is a bit-pointer with the same value and a new packed offset. + bit_ptr: InternPool.Key.PtrType.PackedOffset, + /// The result is a standard pointer. + byte_ptr: struct { + /// The byte offset of the field pointer from the parent pointer value. + offset: u64, + /// The alignment of the field pointer type. + alignment: InternPool.Alignment, + }, +} { + comptime assert(Type.packed_struct_layout_version == 2); + + const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); + const field_ty = struct_ty.structFieldType(field_idx, zcu); + + var bit_offset: u16 = 0; + var running_bits: u16 = 0; + for (0..struct_ty.structFieldCount(zcu)) |i| { + const f_ty = struct_ty.structFieldType(i, zcu); + if (i == field_idx) { + bit_offset = running_bits; + } + running_bits += @intCast(f_ty.bitSize(zcu)); + } + + const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) + .{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset } + else + .{ (running_bits + 7) / 8, bit_offset }; + + // If the field happens to be byte-aligned, simplify the pointer type. + // We can only do this if the pointee's bit size matches its ABI byte size, + // so that loads and stores do not interfere with surrounding packed bits. + // + // TODO: we do not attempt this with big-endian targets yet because of nested + // structs and floats. I need to double-check the desired behavior for big endian + // targets before adding the necessary complications to this code. This will not + // cause miscompilations; it only means the field pointer uses bit masking when it + // might not be strictly necessary. + if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { + const byte_offset = res_bit_offset / 8; + const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); + return .{ .byte_ptr = .{ + .offset = byte_offset, + .alignment = new_align, + } }; + } + + return .{ .bit_ptr = .{ + .host_size = res_host_size, + .bit_offset = res_bit_offset, + } }; +} + +pub fn resolveLayout(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + switch (ip.indexToKey(ty.toIntern())) { + .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + else => {}, + } + switch (ty.zigTypeTag(zcu)) { + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { + const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + try field_ty.resolveLayout(zcu); + }, + .struct_type => return ty.resolveStructInner(zcu, .layout), + else => unreachable, + }, + .Union => return ty.resolveUnionInner(zcu, .layout), + .Array => { + if (ty.arrayLenIncludingSentinel(zcu) == 0) return; + const elem_ty = ty.childType(zcu); + return elem_ty.resolveLayout(zcu); + }, + .Optional => { + const payload_ty = ty.optionalChild(zcu); + return payload_ty.resolveLayout(zcu); + }, + .ErrorUnion => { + const payload_ty = ty.errorUnionPayload(zcu); + return payload_ty.resolveLayout(zcu); + }, + .Fn => { + const info = zcu.typeToFunc(ty).?; + if (info.is_generic) { + // Resolving of generic function types is deferred to when + // the function is instantiated. + return; + } + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + try Type.fromInterned(param_ty).resolveLayout(zcu); + } + try Type.fromInterned(info.return_type).resolveLayout(zcu); + }, + else => {}, + } +} + +pub fn resolveFields(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + const ty_ip = ty.toIntern(); + + switch (ty_ip) { + .none => unreachable, + + .u0_type, + .i0_type, + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .adhoc_inferred_error_set_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .optional_noreturn_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => {}, + + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) { + .type_struct, + .type_struct_packed, + .type_struct_packed_inits, + => return ty.resolveStructInner(zcu, .fields), + + .type_union => return ty.resolveUnionInner(zcu, .fields), + + .simple_type => return resolveSimpleType(ip.indexToKey(ty_ip).simple_type, zcu), + + else => {}, + }, + } +} + +pub fn resolveFully(ty: Type, zcu: *Zcu) SemaError!void { + const ip = &zcu.intern_pool; + + switch (ip.indexToKey(ty.toIntern())) { + .simple_type => |simple_type| return resolveSimpleType(simple_type, zcu), + else => {}, + } + + switch (ty.zigTypeTag(zcu)) { + .Type, + .Void, + .Bool, + .NoReturn, + .Int, + .Float, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .ErrorSet, + .Enum, + .Opaque, + .Frame, + .AnyFrame, + .Vector, + .EnumLiteral, + => {}, + + .Pointer => return ty.childType(zcu).resolveFully(zcu), + .Array => return ty.childType(zcu).resolveFully(zcu), + .Optional => return ty.optionalChild(zcu).resolveFully(zcu), + .ErrorUnion => return ty.errorUnionPayload(zcu).resolveFully(zcu), + .Fn => { + const info = zcu.typeToFunc(ty).?; + if (info.is_generic) return; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; + try Type.fromInterned(param_ty).resolveFully(zcu); + } + try Type.fromInterned(info.return_type).resolveFully(zcu); + }, + + .Struct => switch (ip.indexToKey(ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { + const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + try field_ty.resolveFully(zcu); + }, + .struct_type => return ty.resolveStructInner(zcu, .full), + else => unreachable, + }, + .Union => return ty.resolveUnionInner(zcu, .full), + } +} + +pub fn resolveStructFieldInits(ty: Type, zcu: *Zcu) SemaError!void { + // TODO: stop calling this for tuples! + _ = zcu.typeToStruct(ty) orelse return; + return ty.resolveStructInner(zcu, .inits); +} + +pub fn resolveStructAlignment(ty: Type, zcu: *Zcu) SemaError!void { + return ty.resolveStructInner(zcu, .alignment); +} + +pub fn resolveUnionAlignment(ty: Type, zcu: *Zcu) SemaError!void { + return ty.resolveUnionInner(zcu, .alignment); +} + +/// `ty` must be a struct. +fn resolveStructInner( + ty: Type, + zcu: *Zcu, + resolution: enum { fields, inits, alignment, layout, full }, +) SemaError!void { + const gpa = zcu.gpa; + + const struct_obj = zcu.typeToStruct(ty).?; + const owner_decl_index = struct_obj.decl.unwrap() orelse return; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .mod = zcu, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = undefined, // This ZIR will not be used. + .owner_decl = zcu.declPtr(owner_decl_index), + .owner_decl_index = owner_decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + switch (resolution) { + .fields => return sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj), + .inits => return sema.resolveStructFieldInits(ty), + .alignment => return sema.resolveStructAlignment(ty.toIntern(), struct_obj), + .layout => return sema.resolveStructLayout(ty), + .full => return sema.resolveStructFully(ty), + } +} + +/// `ty` must be a union. +fn resolveUnionInner( + ty: Type, + zcu: *Zcu, + resolution: enum { fields, alignment, layout, full }, +) SemaError!void { + const gpa = zcu.gpa; + + const union_obj = zcu.typeToUnion(ty).?; + const owner_decl_index = union_obj.decl; + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); + + var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); + defer comptime_err_ret_trace.deinit(); + + var sema: Sema = .{ + .mod = zcu, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .code = undefined, // This ZIR will not be used. + .owner_decl = zcu.declPtr(owner_decl_index), + .owner_decl_index = owner_decl_index, + .func_index = .none, + .func_is_naked = false, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_err_ret_trace = &comptime_err_ret_trace, + }; + defer sema.deinit(); + + switch (resolution) { + .fields => return sema.resolveTypeFieldsUnion(ty, union_obj), + .alignment => return sema.resolveUnionAlignment(ty, union_obj), + .layout => return sema.resolveUnionLayout(ty), + .full => return sema.resolveUnionFully(ty), + } +} + +/// Fully resolves a simple type. This is usually a nop, but for builtin types with +/// special InternPool indices (such as std.builtin.Type) it will analyze and fully +/// resolve the type. +fn resolveSimpleType(simple_type: InternPool.SimpleType, zcu: *Zcu) Allocator.Error!void { + const builtin_type_name: []const u8 = switch (simple_type) { + .atomic_order => "AtomicOrder", + .atomic_rmw_op => "AtomicRmwOp", + .calling_convention => "CallingConvention", + .address_space => "AddressSpace", + .float_mode => "FloatMode", + .reduce_op => "ReduceOp", + .call_modifier => "CallModifer", + .prefetch_options => "PrefetchOptions", + .export_options => "ExportOptions", + .extern_options => "ExternOptions", + .type_info => "Type", + else => return, + }; + // This will fully resolve the type. + _ = try zcu.getBuiltinType(builtin_type_name); +} + +/// Returns the type of a pointer to an element. +/// Asserts that the type is a pointer, and that the element type is indexable. +/// If the element index is comptime-known, it must be passed in `offset`. +/// For *@Vector(n, T), return *align(a:b:h:v) T +/// For *[N]T, return *T +/// For [*]T, returns *T +/// For []T, returns *T +/// Handles const-ness and address spaces in particular. +/// This code is duplicated in `Sema.analyzePtrArithmetic`. +/// May perform type resolution and return a transitive `error.AnalysisFail`. +pub fn elemPtrType(ptr_ty: Type, offset: ?usize, zcu: *Zcu) !Type { + const ptr_info = ptr_ty.ptrInfo(zcu); + const elem_ty = ptr_ty.elemType2(zcu); + const is_allowzero = ptr_info.flags.is_allowzero and (offset orelse 0) == 0; + const parent_ty = ptr_ty.childType(zcu); + + const VI = InternPool.Key.PtrType.VectorIndex; + + const vector_info: struct { + host_size: u16 = 0, + alignment: Alignment = .none, + vector_index: VI = .none, + } = if (parent_ty.isVector(zcu) and ptr_info.flags.size == .One) blk: { + const elem_bits = elem_ty.bitSize(zcu); + if (elem_bits == 0) break :blk .{}; + const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); + if (!is_packed) break :blk .{}; + + break :blk .{ + .host_size = @intCast(parent_ty.arrayLen(zcu)), + .alignment = parent_ty.abiAlignment(zcu), + .vector_index = if (offset) |some| @enumFromInt(some) else .runtime, + }; + } else .{}; + + const alignment: Alignment = a: { + // Calculate the new pointer alignment. + if (ptr_info.flags.alignment == .none) { + // In case of an ABI-aligned pointer, any pointer arithmetic + // maintains the same ABI-alignedness. + break :a vector_info.alignment; + } + // If the addend is not a comptime-known value we can still count on + // it being a multiple of the type size. + const elem_size = (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar; + const addend = if (offset) |off| elem_size * off else elem_size; + + // The resulting pointer is aligned to the lcd between the offset (an + // arbitrary number) and the alignment factor (always a power of two, + // non zero). + const new_align: Alignment = @enumFromInt(@min( + @ctz(addend), + ptr_info.flags.alignment.toLog2Units(), + )); + assert(new_align != .none); + break :a new_align; + }; + return zcu.ptrTypeSema(.{ + .child = elem_ty.toIntern(), + .flags = .{ + .alignment = alignment, + .is_const = ptr_info.flags.is_const, + .is_volatile = ptr_info.flags.is_volatile, + .is_allowzero = is_allowzero, + .address_space = ptr_info.flags.address_space, + .vector_index = vector_info.vector_index, + }, + .packed_offset = .{ + .host_size = vector_info.host_size, + .bit_offset = 0, + }, + }); +} + +pub const @"u1": Type = .{ .ip_index = .u1_type }; +pub const @"u8": Type = .{ .ip_index = .u8_type }; +pub const @"u16": Type = .{ .ip_index = .u16_type }; +pub const @"u29": Type = .{ .ip_index = .u29_type }; +pub const @"u32": Type = .{ .ip_index = .u32_type }; +pub const @"u64": Type = .{ .ip_index = .u64_type }; +pub const @"u128": Type = .{ .ip_index = .u128_type }; + +pub const @"i8": Type = .{ .ip_index = .i8_type }; +pub const @"i16": Type = .{ .ip_index = .i16_type }; +pub const @"i32": Type = .{ .ip_index = .i32_type }; +pub const @"i64": Type = .{ .ip_index = .i64_type }; +pub const @"i128": Type = .{ .ip_index = .i128_type }; + +pub const @"f16": Type = .{ .ip_index = .f16_type }; +pub const @"f32": Type = .{ .ip_index = .f32_type }; +pub const @"f64": Type = .{ .ip_index = .f64_type }; +pub const @"f80": Type = .{ .ip_index = .f80_type }; +pub const @"f128": Type = .{ .ip_index = .f128_type }; + +pub const @"bool": Type = .{ .ip_index = .bool_type }; +pub const @"usize": Type = .{ .ip_index = .usize_type }; +pub const @"isize": Type = .{ .ip_index = .isize_type }; +pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; +pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; +pub const @"void": Type = .{ .ip_index = .void_type }; +pub const @"type": Type = .{ .ip_index = .type_type }; +pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; +pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; +pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; +pub const @"null": Type = .{ .ip_index = .null_type }; +pub const @"undefined": Type = .{ .ip_index = .undefined_type }; +pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; + +pub const @"c_char": Type = .{ .ip_index = .c_char_type }; +pub const @"c_short": Type = .{ .ip_index = .c_short_type }; +pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; +pub const @"c_int": Type = .{ .ip_index = .c_int_type }; +pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; +pub const @"c_long": Type = .{ .ip_index = .c_long_type }; +pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; +pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; +pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; +pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; + +pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; +pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; +pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, +}; +pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; +pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; + +pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; + +pub fn smallestUnsignedBits(max: u64) u16 { + if (max == 0) return 0; + const base = std.math.log2(max); + const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; + return @as(u16, @intCast(base + @intFromBool(upper < max))); +} + +/// This is only used for comptime asserts. Bump this number when you make a change +/// to packed struct layout to find out all the places in the codebase you need to edit! +pub const packed_struct_layout_version = 2; + +fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { + return Alignment.fromByteUnits(target.c_type_alignment(c_type)); +} diff --git a/src/Value.zig b/src/Value.zig index 5719ed3689..34a0472c16 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -1,6 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; @@ -161,9 +161,11 @@ pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { }; } +pub const ResolveStrat = Type.ResolveStrat; + /// Asserts the value is an integer. pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { - return val.toBigIntAdvanced(space, mod, null) catch unreachable; + return val.toBigIntAdvanced(space, mod, .normal) catch unreachable; } /// Asserts the value is an integer. @@ -171,7 +173,7 @@ pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!BigIntConst { return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), @@ -181,7 +183,7 @@ pub fn toBigIntAdvanced( .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), .lazy_align, .lazy_size => |ty| { - if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); + if (strat == .sema) try Type.fromInterned(ty).resolveLayout(mod); const x = switch (int.storage) { else => unreachable, .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, @@ -190,10 +192,10 @@ pub fn toBigIntAdvanced( return BigIntMutable.init(&space.limbs, x).toConst(); }, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, strat), .opt, .ptr => BigIntMutable.init( &space.limbs, - (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + (try val.getUnsignedIntAdvanced(mod, strat)).?, ).toConst(), else => unreachable, }, @@ -228,12 +230,12 @@ pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { - return getUnsignedIntAdvanced(val, mod, null) catch unreachable; + return getUnsignedIntAdvanced(val, mod, .normal) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. -pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { +pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, strat: ResolveStrat) !?u64 { return switch (val.toIntern()) { .undef => unreachable, .bool_false => 0, @@ -244,28 +246,22 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), - .lazy_align => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0 - else - Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, - .lazy_size => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar - else - Type.fromInterned(ty).abiSize(mod), + .lazy_align => |ty| (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, + .lazy_size => |ty| (try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, }, .ptr => |ptr| switch (ptr.base_addr) { .int => ptr.byte_offset, .field => |field| { - const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, strat)) orelse return null; const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod); - if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + if (strat == .sema) try struct_ty.resolveLayout(mod); return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod) + ptr.byte_offset; }, else => null, }, .opt => |opt| switch (opt.val) { .none => 0, - else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema), + else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, strat), }, else => null, }, @@ -273,13 +269,13 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedInt(val: Value, mod: *Module) u64 { - return getUnsignedInt(val, mod).?; +pub fn toUnsignedInt(val: Value, zcu: *Zcu) u64 { + return getUnsignedInt(val, zcu).?; } /// Asserts the value is an integer and it fits in a u64 -pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 { - return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?; +pub fn toUnsignedIntSema(val: Value, zcu: *Zcu) !u64 { + return (try getUnsignedIntAdvanced(val, zcu, .sema)).?; } /// Asserts the value is an integer and it fits in a i64 @@ -1028,13 +1024,13 @@ pub fn floatHasFraction(self: Value, mod: *const Module) bool { } pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { - return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; + return orderAgainstZeroAdvanced(lhs, mod, .normal) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!std.math.Order { return switch (lhs.toIntern()) { .bool_false => .eq, @@ -1052,13 +1048,13 @@ pub fn orderAgainstZeroAdvanced( .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( mod, false, - if (opt_sema) |sema| .{ .sema = sema } else .eager, + strat.toLazy(), ) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) .gt else .eq, }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema), + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, strat), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, @@ -1069,14 +1065,13 @@ pub fn orderAgainstZeroAdvanced( /// Asserts the value is comparable. pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { - return orderAdvanced(lhs, rhs, mod, null) catch unreachable; + return orderAdvanced(lhs, rhs, mod, .normal) catch unreachable; } /// Asserts the value is comparable. -/// If opt_sema is null then this function asserts things are resolved and cannot fail. -pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); +pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, strat: ResolveStrat) !std.math.Order { + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, strat); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, strat); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -1096,15 +1091,15 @@ pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !st var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, strat); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, strat); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { - return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; + return compareHeteroAdvanced(lhs, op, rhs, mod, .normal) catch unreachable; } pub fn compareHeteroAdvanced( @@ -1112,7 +1107,7 @@ pub fn compareHeteroAdvanced( op: std.math.CompareOperator, rhs: Value, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) !bool { if (lhs.pointerDecl(mod)) |lhs_decl| { if (rhs.pointerDecl(mod)) |rhs_decl| { @@ -1135,7 +1130,7 @@ pub fn compareHeteroAdvanced( else => {}, } } - return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, strat)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. @@ -1176,22 +1171,22 @@ pub fn compareScalar( /// /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { - return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable; + return compareAllWithZeroAdvancedExtra(lhs, op, mod, .normal) catch unreachable; } -pub fn compareAllWithZeroAdvanced( +pub fn compareAllWithZeroSema( lhs: Value, op: std.math.CompareOperator, - sema: *Sema, + zcu: *Zcu, ) Module.CompileError!bool { - return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema); + return compareAllWithZeroAdvancedExtra(lhs, op, zcu, .sema); } pub fn compareAllWithZeroAdvancedExtra( lhs: Value, op: std.math.CompareOperator, mod: *Module, - opt_sema: ?*Sema, + strat: ResolveStrat, ) Module.CompileError!bool { if (lhs.isInf(mod)) { switch (op) { @@ -1211,14 +1206,14 @@ pub fn compareAllWithZeroAdvancedExtra( if (!std.math.order(byte, 0).compare(op)) break false; } else true, .elems => |elems| for (elems) |elem| { - if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat)) break false; } else true, - .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, strat), }, .undef => return false, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, strat)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -1279,9 +1274,9 @@ pub fn slicePtr(val: Value, mod: *Module) Value { } /// Gets the `len` field of a slice value as a `u64`. -/// Resolves the length using the provided `Sema` if necessary. -pub fn sliceLen(val: Value, sema: *Sema) !u64 { - return Value.fromInterned(sema.mod.intern_pool.sliceLen(val.toIntern())).toUnsignedIntAdvanced(sema); +/// Resolves the length using `Sema` if necessary. +pub fn sliceLen(val: Value, zcu: *Zcu) !u64 { + return Value.fromInterned(zcu.intern_pool.sliceLen(val.toIntern())).toUnsignedIntSema(zcu); } /// Asserts the value is an aggregate, and returns the element value at the given index. @@ -1482,29 +1477,29 @@ pub fn isFloat(self: Value, mod: *const Module) bool { } pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { - return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { + return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, .normal) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => unreachable, }; } -pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { +pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).toIntern(); + scalar.* = (try floatFromIntScalar(elem_val, scalar_ty, mod, strat)).toIntern(); } return Value.fromInterned((try mod.intern(.{ .aggregate = .{ .ty = float_ty.toIntern(), .storage = .{ .elems = result_data }, } }))); } - return floatFromIntScalar(val, float_ty, mod, opt_sema); + return floatFromIntScalar(val, float_ty, mod, strat); } -pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { +pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, strat: ResolveStrat) !Value { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => try mod.undefValue(float_ty), .int => |int| switch (int.storage) { @@ -1513,16 +1508,8 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?* return mod.floatValue(float_ty, float); }, inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), - .lazy_align => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod); - }, - .lazy_size => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod); - }, + .lazy_align => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, strat.toLazy())).scalar.toByteUnits() orelse 0, float_ty, mod), + .lazy_size => |ty| return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, strat.toLazy())).scalar, float_ty, mod), }, else => unreachable, }; @@ -3616,17 +3603,15 @@ pub const RuntimeIndex = InternPool.RuntimeIndex; /// `parent_ptr` must be a single-pointer to some optional. /// Returns a pointer to the payload of the optional. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrOptPayload(parent_ptr: Value, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const opt_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(opt_ty.zigTypeTag(zcu) == .Optional); - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an optional has the same // natural alignment as its child type. @@ -3651,17 +3636,15 @@ pub fn ptrOptPayload(parent_ptr: Value, sema: *Sema) !Value { /// `parent_ptr` must be a single-pointer to some error union. /// Returns a pointer to the payload of the error union. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrEuPayload(parent_ptr: Value, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const eu_ty = parent_ptr_ty.childType(zcu); assert(parent_ptr_ty.ptrSize(zcu) == .One); assert(eu_ty.zigTypeTag(zcu) == .ErrorUnion); - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_ty.ptrInfo(zcu); // We can correctly preserve alignment `.none`, since an error union has a // natural alignment greater than or equal to that of its payload type. @@ -3682,10 +3665,8 @@ pub fn ptrEuPayload(parent_ptr: Value, sema: *Sema) !Value { /// `parent_ptr` must be a single-pointer to a struct, union, or slice. /// Returns a pointer to the aggregate field at the specified index. /// For slices, uses `slice_ptr_index` and `slice_len_index`. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrField(parent_ptr: Value, field_idx: u32, zcu: *Zcu) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const aggregate_ty = parent_ptr_ty.childType(zcu); @@ -3698,17 +3679,17 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { .Struct => field: { const field_ty = aggregate_ty.structFieldType(field_idx, zcu); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, .@"extern" => { // Well-defined layout, so just offset the pointer appropriately. const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu); const field_align = a: { const parent_align = if (parent_ptr_info.flags.alignment == .none) pa: { - break :pa try sema.typeAbiAlignment(aggregate_ty); + break :pa (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; } else parent_ptr_info.flags.alignment; break :a InternPool.Alignment.fromLog2Units(@min(parent_align.toLog2Units(), @ctz(byte_off))); }; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = field_align; @@ -3723,14 +3704,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { new.packed_offset = packed_offset; new.child = field_ty.toIntern(); if (new.flags.alignment == .none) { - new.flags.alignment = try sema.typeAbiAlignment(aggregate_ty); + new.flags.alignment = (try aggregate_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; } break :info new; }); return zcu.getCoerced(parent_ptr, result_ty); }, .byte_ptr => |ptr_info| { - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.packed_offset = .{ @@ -3749,10 +3730,10 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { const union_obj = zcu.typeToUnion(aggregate_ty).?; const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]); switch (aggregate_ty.containerLayout(zcu)) { - .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, sema) }, + .auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), zcu, .sema) }, .@"extern" => { // Point to the same address. - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); break :info new; @@ -3762,28 +3743,28 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { .@"packed" => { // If the field has an ABI size matching its bit size, then we can continue to use a // non-bit pointer if the parent pointer is also a non-bit pointer. - if (parent_ptr_info.packed_offset.host_size == 0 and try sema.typeAbiSize(field_ty) * 8 == try field_ty.bitSizeAdvanced(zcu, sema)) { + if (parent_ptr_info.packed_offset.host_size == 0 and (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar * 8 == try field_ty.bitSizeAdvanced(zcu, .sema)) { // We must offset the pointer on big-endian targets, since the bits of packed memory don't align nicely. const byte_offset = switch (zcu.getTarget().cpu.arch.endian()) { .little => 0, - .big => try sema.typeAbiSize(aggregate_ty) - try sema.typeAbiSize(field_ty), + .big => (try aggregate_ty.abiSizeAdvanced(zcu, .sema)).scalar - (try field_ty.abiSizeAdvanced(zcu, .sema)).scalar, }; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = InternPool.Alignment.fromLog2Units( - @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, sema)).toByteUnits().?), + @ctz(byte_offset | (try parent_ptr_ty.ptrAlignmentAdvanced(zcu, .sema)).toByteUnits().?), ); break :info new; }); return parent_ptr.getOffsetPtr(byte_offset, result_ty, zcu); } else { // The result must be a bit-pointer if it is not already. - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); if (new.packed_offset.host_size == 0) { - new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, sema)) + 7) / 8); + new.packed_offset.host_size = @intCast(((try aggregate_ty.bitSizeAdvanced(zcu, .sema)) + 7) / 8); assert(new.packed_offset.bit_offset == 0); } break :info new; @@ -3805,14 +3786,14 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { }; const new_align: InternPool.Alignment = if (parent_ptr_info.flags.alignment != .none) a: { - const ty_align = try sema.typeAbiAlignment(field_ty); + const ty_align = (try field_ty.abiAlignmentAdvanced(zcu, .sema)).scalar; const true_field_align = if (field_align == .none) ty_align else field_align; const new_align = true_field_align.min(parent_ptr_info.flags.alignment); if (new_align == ty_align) break :a .none; break :a new_align; } else field_align; - const result_ty = try sema.ptrType(info: { + const result_ty = try zcu.ptrTypeSema(info: { var new = parent_ptr_info; new.child = field_ty.toIntern(); new.flags.alignment = new_align; @@ -3834,10 +3815,8 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, sema: *Sema) !Value { /// `orig_parent_ptr` must be either a single-pointer to an array or vector, or a many-pointer or C-pointer or slice. /// Returns a pointer to the element at the specified index. -/// This takes a `Sema` because it may need to perform type resolution. -pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { - const zcu = sema.mod; - +/// May perform type resolution. +pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, zcu: *Zcu) !Value { const parent_ptr = switch (orig_parent_ptr.typeOf(zcu).ptrSize(zcu)) { .One, .Many, .C => orig_parent_ptr, .Slice => orig_parent_ptr.slicePtr(zcu), @@ -3845,7 +3824,7 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { const parent_ptr_ty = parent_ptr.typeOf(zcu); const elem_ty = parent_ptr_ty.childType(zcu); - const result_ty = try sema.elemPtrType(parent_ptr_ty, @intCast(field_idx)); + const result_ty = try parent_ptr_ty.elemPtrType(@intCast(field_idx), zcu); if (parent_ptr.isUndef(zcu)) return zcu.undefValue(result_ty); @@ -3862,21 +3841,21 @@ pub fn ptrElem(orig_parent_ptr: Value, field_idx: u64, sema: *Sema) !Value { const strat: PtrStrat = switch (parent_ptr_ty.ptrSize(zcu)) { .One => switch (elem_ty.zigTypeTag(zcu)) { - .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, sema), 8) }, + .Vector => .{ .offset = field_idx * @divExact(try elem_ty.childType(zcu).bitSizeAdvanced(zcu, .sema), 8) }, .Array => strat: { const arr_elem_ty = elem_ty.childType(zcu); - if (try sema.typeRequiresComptime(arr_elem_ty)) { + if (try arr_elem_ty.comptimeOnlyAdvanced(zcu, .sema)) { break :strat .{ .elem_ptr = arr_elem_ty }; } - break :strat .{ .offset = field_idx * try sema.typeAbiSize(arr_elem_ty) }; + break :strat .{ .offset = field_idx * (try arr_elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }; }, else => unreachable, }, - .Many, .C => if (try sema.typeRequiresComptime(elem_ty)) + .Many, .C => if (try elem_ty.comptimeOnlyAdvanced(zcu, .sema)) .{ .elem_ptr = elem_ty } else - .{ .offset = field_idx * try sema.typeAbiSize(elem_ty) }, + .{ .offset = field_idx * (try elem_ty.abiSizeAdvanced(zcu, .sema)).scalar }, .Slice => unreachable, }; @@ -4014,11 +3993,7 @@ pub const PointerDeriveStep = union(enum) { pub fn pointerDerivation(ptr_val: Value, arena: Allocator, zcu: *Zcu) Allocator.Error!PointerDeriveStep { return ptr_val.pointerDerivationAdvanced(arena, zcu, null) catch |err| switch (err) { error.OutOfMemory => |e| return e, - error.AnalysisFail, - error.GenericPoison, - error.ComptimeReturn, - error.ComptimeBreak, - => unreachable, + error.AnalysisFail => unreachable, }; } @@ -4087,8 +4062,8 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op const base_ptr_ty = base_ptr.typeOf(zcu); const agg_ty = base_ptr_ty.childType(zcu); const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) { - .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, - .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, opt_sema) }, + .Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, + .Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(@intCast(field.index), zcu, .sema) }, .Pointer => .{ switch (field.index) { Value.slice_ptr_index => agg_ty.slicePtrFieldType(zcu), Value.slice_len_index => Type.usize, @@ -4269,3 +4244,118 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, zcu: *Zcu, op .new_ptr_ty = Type.fromInterned(ptr.ty), } }; } + +pub fn resolveLazy(val: Value, arena: Allocator, zcu: *Zcu) Zcu.SemaError!Value { + switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return val, + .lazy_align, .lazy_size => return zcu.intValue( + Type.fromInterned(int.ty), + (try val.getUnsignedIntAdvanced(zcu, .sema)).?, + ), + }, + .slice => |slice| { + const ptr = try Value.fromInterned(slice.ptr).resolveLazy(arena, zcu); + const len = try Value.fromInterned(slice.len).resolveLazy(arena, zcu); + if (ptr.toIntern() == slice.ptr and len.toIntern() == slice.len) return val; + return Value.fromInterned(try zcu.intern(.{ .slice = .{ + .ty = slice.ty, + .ptr = ptr.toIntern(), + .len = len.toIntern(), + } })); + }, + .ptr => |ptr| { + switch (ptr.base_addr) { + .decl, .comptime_alloc, .anon_decl, .int => return val, + .comptime_field => |field_val| { + const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_field_val == field_val) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = .{ .comptime_field = resolved_field_val }, + .byte_offset = ptr.byte_offset, + } }))); + }, + .eu_payload, .opt_payload => |base| { + const resolved_base = (try Value.fromInterned(base).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_base == base) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = switch (ptr.base_addr) { + .eu_payload => .{ .eu_payload = resolved_base }, + .opt_payload => .{ .opt_payload = resolved_base }, + else => unreachable, + }, + .byte_offset = ptr.byte_offset, + } }))); + }, + .arr_elem, .field => |base_index| { + const resolved_base = (try Value.fromInterned(base_index.base).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_base == base_index.base) + val + else + Value.fromInterned((try zcu.intern(.{ .ptr = .{ + .ty = ptr.ty, + .base_addr = switch (ptr.base_addr) { + .arr_elem => .{ .arr_elem = .{ + .base = resolved_base, + .index = base_index.index, + } }, + .field => .{ .field = .{ + .base = resolved_base, + .index = base_index.index, + } }, + else => unreachable, + }, + .byte_offset = ptr.byte_offset, + } }))); + }, + } + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => return val, + .elems => |elems| { + var resolved_elems: []InternPool.Index = &.{}; + for (elems, 0..) |elem, i| { + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + if (resolved_elems.len == 0 and resolved_elem != elem) { + resolved_elems = try arena.alloc(InternPool.Index, elems.len); + @memcpy(resolved_elems[0..i], elems[0..i]); + } + if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; + } + return if (resolved_elems.len == 0) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .elems = resolved_elems }, + } }))); + }, + .repeated_elem => |elem| { + const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_elem == elem) val else Value.fromInterned((try zcu.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .repeated_elem = resolved_elem }, + } }))); + }, + }, + .un => |un| { + const resolved_tag = if (un.tag == .none) + .none + else + (try Value.fromInterned(un.tag).resolveLazy(arena, zcu)).toIntern(); + const resolved_val = (try Value.fromInterned(un.val).resolveLazy(arena, zcu)).toIntern(); + return if (resolved_tag == un.tag and resolved_val == un.val) + val + else + Value.fromInterned((try zcu.intern(.{ .un = .{ + .ty = un.ty, + .tag = resolved_tag, + .val = resolved_val, + } }))); + }, + else => return val, + } +} diff --git a/src/Zcu.zig b/src/Zcu.zig index a490990cf3..adfe60e678 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -20,7 +20,7 @@ const Zcu = @This(); const Compilation = @import("Compilation.zig"); const Cache = std.Build.Cache; const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const Air = @import("Air.zig"); @@ -35,6 +35,7 @@ const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); const InternPool = @import("InternPool.zig"); const Alignment = InternPool.Alignment; +const AnalUnit = InternPool.AnalUnit; const BuiltinFn = std.zig.BuiltinFn; const LlvmObject = @import("codegen/llvm.zig").Object; @@ -71,18 +72,22 @@ codegen_prog_node: std.Progress.Node = undefined, global_zir_cache: Compilation.Directory, /// Used by AstGen worker to load and store ZIR cache. local_zir_cache: Compilation.Directory, -/// It's rare for a decl to be exported, so we save memory by having a sparse -/// map of Decl indexes to details about them being exported. -/// The Export memory is owned by the `export_owners` table; the slice itself -/// is owned by this table. The slice is guaranteed to not be empty. -decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(*Export)) = .{}, -/// Same as `decl_exports` but for exported constant values. -value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, ArrayListUnmanaged(*Export)) = .{}, -/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl -/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that -/// is performing the export of another Decl. -/// This table owns the Export memory. -export_owners: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(*Export)) = .{}, +/// This is where all `Export` values are stored. Not all values here are necessarily valid exports; +/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted. +all_exports: ArrayListUnmanaged(Export) = .{}, +/// This is a list of free indices in `all_exports`. These indices may be reused by exports from +/// future semantic analysis. +free_exports: ArrayListUnmanaged(u32) = .{}, +/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of +/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit` +/// whose analysis triggered the export. +single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +/// Like `single_exports`, but for `AnalUnit`s which perform multiple exports. +/// The exports are `all_exports.items[index..][0..len]`. +multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { + index: u32, + len: u32, +}) = .{}, /// The set of all the Zig source files in the Module. We keep track of this in order /// to iterate over it and check which source files have been modified on the file system when /// an update is requested, as well as to cache `@import` results. @@ -103,15 +108,11 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{}, /// is not yet implemented. intern_pool: InternPool = .{}, -/// We optimize memory usage for a compilation with no compile errors by storing the -/// error messages and mapping outside of `Decl`. -/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. -/// Note that a Decl can succeed but the Fn it represents can fail. In this case, -/// a Decl can have a failed_decls entry but have analysis status of success. -failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{}, -/// Keep track of one `@compileLog` callsite per owner Decl. +/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. +failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .{}, +/// Keep track of one `@compileLog` callsite per `AnalUnit`. /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`. -compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, extern struct { +compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { base_node_inst: InternPool.TrackedInst.Index, node_offset: i32, pub fn src(self: @This()) LazySrcLoc { @@ -126,12 +127,11 @@ compile_log_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, extern struct { failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{}, /// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator. failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{}, -/// Using a map here for consistency with the other fields here. -/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator. -failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{}, -/// If a decl failed due to a cimport error, the corresponding Clang errors +/// Key is index into `all_exports`. +failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{}, +/// If analysis failed due to a cimport error, the corresponding Clang errors /// are stored here. -cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, std.zig.ErrorBundle) = .{}, +cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{}, /// Key is the error name, index is the error tag value. Index 0 has a length-0 string. global_error_set: GlobalErrorSet = .{}, @@ -139,26 +139,26 @@ global_error_set: GlobalErrorSet = .{}, /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, -/// Value is the number of PO or outdated Decls which this AnalSubject depends on. -potentially_outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, u32) = .{}, -/// Value is the number of PO or outdated Decls which this AnalSubject depends on. -/// Once this value drops to 0, the AnalSubject is a candidate for re-analysis. -outdated: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, u32) = .{}, -/// This contains all `AnalSubject`s in `outdated` whose PO dependency count is 0. -/// Such `AnalSubject`s are ready for immediate re-analysis. +/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +/// Value is the number of PO or outdated Decls which this AnalUnit depends on. +/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis. +outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0. +/// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. -outdated_ready: std.AutoArrayHashMapUnmanaged(InternPool.AnalSubject, void) = .{}, +outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, /// This contains a set of Decls which may not be in `outdated`, but are the /// root Decls of files which have updated source and thus must be re-analyzed. /// If such a Decl is only in this set, the struct type index may be preserved /// (only the namespace might change). If such a Decl is also `outdated`, the /// struct type index must be recreated. outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// This contains a list of AnalSubject whose analysis or codegen failed, but the +/// This contains a list of AnalUnit whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of /// it as outdated. -retryable_failures: std.ArrayListUnmanaged(InternPool.AnalSubject) = .{}, +retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{}, stage1_flags: packed struct { have_winmain: bool = false, @@ -176,12 +176,18 @@ emit_h: ?*GlobalEmitH, test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, +/// TODO: the key here will be a `Cau.Index`. global_assembly: std.AutoArrayHashMapUnmanaged(Decl.Index, []u8) = .{}, -reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { - referencer: Decl.Index, - src: LazySrcLoc, -}) = .{}, +/// Key is the `AnalUnit` *performing* the reference. This representation allows +/// incremental updates to quickly delete references caused by a specific `AnalUnit`. +/// Value is index into `all_reference` of the first reference triggered by the unit. +/// The `next` field on the `Reference` forms a linked list of all references +/// triggered by the key `AnalUnit`. +reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +all_references: std.ArrayListUnmanaged(Reference) = .{}, +/// Freelist of indices in `all_references`. +free_references: std.ArrayListUnmanaged(u32) = .{}, panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, /// The panic function body. @@ -262,13 +268,25 @@ pub const Exported = union(enum) { decl_index: Decl.Index, /// Constant value being exported. value: InternPool.Index, + + pub fn getValue(exported: Exported, zcu: *Zcu) Value { + return switch (exported) { + .decl_index => |decl_index| zcu.declPtr(decl_index).val, + .value => |value| Value.fromInterned(value), + }; + } + + pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment { + return switch (exported) { + .decl_index => |decl_index| zcu.declPtr(decl_index).alignment, + .value => .none, + }; + } }; pub const Export = struct { opts: Options, src: LazySrcLoc, - /// The Decl that performs the export. Note that this is *not* the Decl being exported. - owner_decl: Decl.Index, exported: Exported, status: enum { in_progress, @@ -285,50 +303,16 @@ pub const Export = struct { section: InternPool.OptionalNullTerminatedString = .none, visibility: std.builtin.SymbolVisibility = .default, }; - - pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { - return exp.src.upgrade(mod); - } }; -const ValueArena = struct { - state: std.heap.ArenaAllocator.State, - state_acquired: ?*std.heap.ArenaAllocator.State = null, - - /// If this ValueArena replaced an existing one during re-analysis, this is the previous instance - prev: ?*ValueArena = null, - - /// Returns an allocator backed by either promoting `state`, or by the existing ArenaAllocator - /// that has already promoted `state`. `out_arena_allocator` provides storage for the initial promotion, - /// and must live until the matching call to release(). - pub fn acquire(self: *ValueArena, child_allocator: Allocator, out_arena_allocator: *std.heap.ArenaAllocator) Allocator { - if (self.state_acquired) |state_acquired| { - return @as(*std.heap.ArenaAllocator, @fieldParentPtr("state", state_acquired)).allocator(); - } - - out_arena_allocator.* = self.state.promote(child_allocator); - self.state_acquired = &out_arena_allocator.state; - return out_arena_allocator.allocator(); - } - - /// Releases the allocator acquired by `acquire. `arena_allocator` must match the one passed to `acquire`. - pub fn release(self: *ValueArena, arena_allocator: *std.heap.ArenaAllocator) void { - if (@as(*std.heap.ArenaAllocator, @fieldParentPtr("state", self.state_acquired.?)) == arena_allocator) { - self.state = self.state_acquired.?.*; - self.state_acquired = null; - } - } - - pub fn deinit(self: ValueArena, child_allocator: Allocator) void { - assert(self.state_acquired == null); - - const prev = self.prev; - self.state.promote(child_allocator).deinit(); - - if (prev) |p| { - p.deinit(child_allocator); - } - } +pub const Reference = struct { + /// The `AnalUnit` whose semantic analysis was triggered by this reference. + referenced: AnalUnit, + /// Index into `all_references` of the next `Reference` triggered by the same `AnalUnit`. + /// `std.math.maxInt(u32)` is the sentinel. + next: u32, + /// The source location of the reference. + src: LazySrcLoc, }; pub const Decl = struct { @@ -369,9 +353,9 @@ pub const Decl = struct { /// successfully complete semantic analysis. dependency_failure, /// Semantic analysis failure. - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. sema_failure, - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. + /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. codegen_failure, /// Sematic analysis and constant value codegen of this Decl has /// succeeded. However, the Decl may be outdated due to an in-progress @@ -759,7 +743,7 @@ pub const File = struct { /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen. multi_pkg: bool = false, /// List of references to this file, used for multi-package errors. - references: std.ArrayListUnmanaged(Reference) = .{}, + references: std.ArrayListUnmanaged(File.Reference) = .{}, /// The hash of the path to this file, used to store `InternPool.TrackedInst`. path_digest: Cache.BinDigest, @@ -772,7 +756,10 @@ pub const File = struct { /// A single reference to a file. pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. - import: SrcLoc, + import: struct { + file: *File, + token: Ast.TokenIndex, + }, /// The file is the root of a module. root: *Package.Module, }; @@ -926,7 +913,7 @@ pub const File = struct { } /// Add a reference to this file during AstGen. - pub fn addReference(file: *File, mod: Module, ref: Reference) !void { + pub fn addReference(file: *File, zcu: Zcu, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the // front of the references array (see below), this loop is actually O(1) on valid code. if (ref == .root) { @@ -943,17 +930,17 @@ pub const File = struct { // to make multi-module errors more helpful (since "root-of" notes are generally more // informative than "imported-from" notes). This path is hit very rarely, so the speed // of the insert operation doesn't matter too much. - .root => try file.references.insert(mod.gpa, 0, ref), + .root => try file.references.insert(zcu.gpa, 0, ref), // Other references we'll just put at the end. - else => try file.references.append(mod.gpa, ref), + else => try file.references.append(zcu.gpa, ref), } - const pkg = switch (ref) { - .import => |loc| loc.file_scope.mod, - .root => |pkg| pkg, + const mod = switch (ref) { + .import => |import| import.file.mod, + .root => |mod| mod, }; - if (pkg != file.mod) file.multi_pkg = true; + if (mod != file.mod) file.multi_pkg = true; } /// Mark this file and every file referenced by it as multi_pkg and report an @@ -993,36 +980,25 @@ pub const EmbedFile = struct { owner: *Package.Module, stat: Cache.File.Stat, val: InternPool.Index, - src_loc: SrcLoc, + src_loc: LazySrcLoc, }; /// This struct holds data necessary to construct API-facing `AllErrors.Message`. /// Its memory is managed with the general purpose allocator so that they /// can be created and destroyed in response to incremental updates. -/// In some cases, the File could have been inferred from where the ErrorMsg -/// is stored. For example, if it is stored in Module.failed_decls, then the File -/// would be determined by the Decl Scope. However, the data structure contains the field -/// anyway so that `ErrorMsg` can be reused for error notes, which may be in a different -/// file than the parent error message. It also simplifies processing of error messages. pub const ErrorMsg = struct { - src_loc: SrcLoc, + src_loc: LazySrcLoc, msg: []const u8, notes: []ErrorMsg = &.{}, - reference_trace: []Trace = &.{}, - hidden_references: u32 = 0, - - pub const Trace = struct { - decl: InternPool.NullTerminatedString, - src_loc: SrcLoc, - }; + reference_trace_root: AnalUnit.Optional = .none, pub fn create( gpa: Allocator, - src_loc: SrcLoc, + src_loc: LazySrcLoc, comptime format: []const u8, args: anytype, ) !*ErrorMsg { - assert(src_loc.lazy != .unneeded); + assert(src_loc.offset != .unneeded); const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); @@ -1038,7 +1014,7 @@ pub const ErrorMsg = struct { pub fn init( gpa: Allocator, - src_loc: SrcLoc, + src_loc: LazySrcLoc, comptime format: []const u8, args: anytype, ) !ErrorMsg { @@ -1054,7 +1030,6 @@ pub const ErrorMsg = struct { } gpa.free(err_msg.notes); gpa.free(err_msg.msg); - gpa.free(err_msg.reference_trace); err_msg.* = undefined; } }; @@ -2027,15 +2002,12 @@ pub const LazySrcLoc = struct { entire_file, /// The source location points to a byte offset within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. byte_abs: u32, /// The source location points to a token within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. token_abs: u32, /// The source location points to an AST node within a source file, /// offset from 0. The source file is determined contextually. - /// Inside a `SrcLoc`, the `file_scope` union field will be active. node_abs: u32, /// The source location points to a byte offset within a source file, /// offset from the byte offset of the base node within the file. @@ -2406,8 +2378,7 @@ pub const LazySrcLoc = struct { } /// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`. - /// TODO: it is incorrect to store a `SrcLoc` anywhere due to incremental compilation. - /// Probably the type should be removed entirely and this resolution performed on-the-fly when needed. + /// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates. pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc { const file, const base_node = resolveBaseNode(lazy.base_node_inst, zcu); return .{ @@ -2452,8 +2423,6 @@ pub fn deinit(zcu: *Zcu) void { for (zcu.import_table.keys()) |key| { gpa.free(key); } - var failed_decls = zcu.failed_decls; - zcu.failed_decls = .{}; for (zcu.import_table.values()) |value| { value.destroy(zcu); } @@ -2471,10 +2440,10 @@ pub fn deinit(zcu: *Zcu) void { zcu.local_zir_cache.handle.close(); zcu.global_zir_cache.handle.close(); - for (failed_decls.values()) |value| { + for (zcu.failed_analysis.values()) |value| { value.destroy(gpa); } - failed_decls.deinit(gpa); + zcu.failed_analysis.deinit(gpa); if (zcu.emit_h) |emit_h| { for (emit_h.failed_decls.values()) |value| { @@ -2505,22 +2474,12 @@ pub fn deinit(zcu: *Zcu) void { } zcu.cimport_errors.deinit(gpa); - zcu.compile_log_decls.deinit(gpa); + zcu.compile_log_sources.deinit(gpa); - for (zcu.decl_exports.values()) |*export_list| { - export_list.deinit(gpa); - } - zcu.decl_exports.deinit(gpa); - - for (zcu.value_exports.values()) |*export_list| { - export_list.deinit(gpa); - } - zcu.value_exports.deinit(gpa); - - for (zcu.export_owners.values()) |*value| { - freeExportList(gpa, value); - } - zcu.export_owners.deinit(gpa); + zcu.all_exports.deinit(gpa); + zcu.free_exports.deinit(gpa); + zcu.single_exports.deinit(gpa); + zcu.multi_exports.deinit(gpa); zcu.global_error_set.deinit(gpa); @@ -2538,6 +2497,8 @@ pub fn deinit(zcu: *Zcu) void { zcu.global_assembly.deinit(gpa); zcu.reference_table.deinit(gpa); + zcu.all_references.deinit(gpa); + zcu.free_references.deinit(gpa); { var it = zcu.intern_pool.allocated_namespaces.iterator(0); @@ -2590,11 +2551,6 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { return decl_index == namespace.decl_index; } -fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { - for (export_list.items) |exp| gpa.destroy(exp); - export_list.deinit(gpa); -} - // TODO https://github.com/ziglang/zig/issues/8643 const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; const HackDataLayout = extern struct { @@ -3137,9 +3093,9 @@ fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { } } -/// Given a AnalSubject which is newly outdated or PO, mark all AnalSubjects which may -/// in turn be PO, due to a dependency on the original AnalSubject's tyval or IES. -fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.AnalSubject) !void { +/// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may +/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES. +fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void { var it = zcu.intern_pool.dependencyIterator(switch (maybe_outdated.unwrap()) { .decl => |decl_index| .{ .decl_val = decl_index }, // TODO: also `decl_ref` deps when introduced .func => |func_index| .{ .func_ies = func_index }, @@ -3161,12 +3117,12 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternP continue; } try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1); - // This AnalSubject was not already PO, so we must recursively mark its dependers as also PO. + // This AnalUnit was not already PO, so we must recursively mark its dependers as also PO. try zcu.markTransitiveDependersPotentiallyOutdated(po); } } -pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject { +pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.debug_incremental) return null; if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { @@ -3174,8 +3130,8 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject return null; } - // Our goal is to find an outdated AnalSubject which itself has no outdated or - // PO dependencies. Most of the time, such an AnalSubject will exist - we track + // Our goal is to find an outdated AnalUnit which itself has no outdated or + // PO dependencies. Most of the time, such an AnalUnit will exist - we track // them in the `outdated_ready` set for efficiency. However, this is not // necessarily the case, since the Decl dependency graph may contain loops // via mutually recursive definitions: @@ -3197,7 +3153,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject // `outdated`. This set will be small (number of files changed in this // update), so it's alright for us to just iterate here. for (zcu.outdated_file_root.keys()) |file_decl| { - const decl_depender = InternPool.AnalSubject.wrap(.{ .decl = file_decl }); + const decl_depender = AnalUnit.wrap(.{ .decl = file_decl }); if (zcu.outdated.contains(decl_depender)) { // Since we didn't hit this in the first loop, this Decl must have // pending dependencies, so is ineligible. @@ -3213,7 +3169,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject return decl_depender; } - // There is no single AnalSubject which is ready for re-analysis. Instead, we + // There is no single AnalUnit which is ready for re-analysis. Instead, we // must assume that some Decl with PO dependencies is outdated - e.g. in the // above example we arbitrarily pick one of A or B. We should select a Decl, // since a Decl is definitely responsible for the loop in the dependency @@ -3221,7 +3177,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject // The choice of this Decl could have a big impact on how much total // analysis we perform, since if analysis concludes its tyval is unchanged, - // then other PO AnalSubject may be resolved as up-to-date. To hopefully avoid + // then other PO AnalUnit may be resolved as up-to-date. To hopefully avoid // doing too much work, let's find a Decl which the most things depend on - // the idea is that this will resolve a lot of loops (but this is only a // heuristic). @@ -3271,7 +3227,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.AnalSubject chosen_decl_dependers, }); - return InternPool.AnalSubject.wrap(.{ .decl = chosen_decl_idx.? }); + return AnalUnit.wrap(.{ .decl = chosen_decl_idx.? }); } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -3281,12 +3237,12 @@ pub fn flushRetryableFailures(zcu: *Zcu) !void { for (zcu.retryable_failures.items) |depender| { if (zcu.outdated.contains(depender)) continue; if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| { - // This AnalSubject was already PO, but we now consider it outdated. + // This AnalUnit was already PO, but we now consider it outdated. // Any transitive dependencies are already marked PO. try zcu.outdated.put(gpa, depender, kv.value); continue; } - // This AnalSubject was not marked PO, but is now outdated. Mark it as + // This AnalUnit was not marked PO, but is now outdated. Mark it as // such, then recursively mark transitive dependencies as PO. try zcu.outdated.put(gpa, depender, 0); try zcu.markTransitiveDependersPotentiallyOutdated(depender); @@ -3456,7 +3412,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // which tries to limit re-analysis to Decls whose previously listed // dependencies are all up-to-date. - const decl_as_depender = InternPool.AnalSubject.wrap(.{ .decl = decl_index }); + const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index }); const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or mod.potentially_outdated.swapRemove(decl_as_depender); @@ -3485,7 +3441,8 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. if (build_options.only_c) unreachable; - try mod.deleteDeclExports(decl_index); + mod.deleteUnitExports(decl_as_depender); + mod.deleteUnitReferences(decl_as_depender); } const sema_result: SemaDeclResult = blk: { @@ -3521,11 +3478,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { error.GenericPoison => unreachable, else => |e| { decl.analysis = .sema_failure; - try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); - try mod.retryable_failures.append(mod.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); - mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( + try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1); + try mod.retryable_failures.append(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index })); + mod.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( mod.gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -3581,7 +3538,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In // that's the case, we should remove this function from the binary. if (decl.val.ip_index != func_index) { try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - ip.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); ip.remove(func_index); @panic("TODO: remove orphaned function from binary"); } @@ -3607,12 +3564,15 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In .complete => {}, } - const func_as_depender = InternPool.AnalSubject.wrap(.{ .func = func_index }); + const func_as_depender = AnalUnit.wrap(.{ .func = func_index }); const was_outdated = zcu.outdated.swapRemove(func_as_depender) or zcu.potentially_outdated.swapRemove(func_as_depender); if (was_outdated) { + if (build_options.only_c) unreachable; _ = zcu.outdated_ready.swapRemove(func_as_depender); + zcu.deleteUnitExports(func_as_depender); + zcu.deleteUnitReferences(func_as_depender); } switch (func.analysis(ip).state) { @@ -3647,7 +3607,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }, error.OutOfMemory => return error.OutOfMemory, }; - defer air.deinit(gpa); + errdefer air.deinit(gpa); const invalidate_ies_deps = i: { if (!was_outdated) break :i false; @@ -3669,13 +3629,36 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In const dump_llvm_ir = build_options.enable_debug_extensions and (comp.verbose_llvm_ir != null or comp.verbose_llvm_bc != null); if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) { + air.deinit(gpa); return; } + try comp.work_queue.writeItem(.{ .codegen_func = .{ + .func = func_index, + .air = air, + } }); +} + +/// Takes ownership of `air`, even on error. +/// If any types referenced by `air` are unresolved, marks the codegen as failed. +pub fn linkerUpdateFunc(zcu: *Zcu, func_index: InternPool.Index, air: Air) Allocator.Error!void { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const comp = zcu.comp; + + defer { + var air_mut = air; + air_mut.deinit(gpa); + } + + const func = zcu.funcInfo(func_index); + const decl_index = func.owner_decl; + const decl = zcu.declPtr(decl_index); + var liveness = try Liveness.analyze(gpa, air, ip); defer liveness.deinit(gpa); - if (dump_air) { + if (build_options.enable_debug_extensions and comp.verbose_air) { const fqn = try decl.fullyQualifiedName(zcu); std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("print_air.zig").dump(zcu, air, liveness); @@ -3683,7 +3666,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In } if (std.debug.runtime_safety) { - var verify = Liveness.Verify{ + var verify: Liveness.Verify = .{ .gpa = gpa, .air = air, .liveness = liveness, @@ -3694,12 +3677,12 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In verify.verify() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber( - decl_index, + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber( + AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -3713,31 +3696,34 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); defer codegen_prog_node.end(); - if (comp.bin_file) |lf| { + if (!air.typesFullyResolved(zcu)) { + // A type we depend on failed to resolve. This is a transitive failure. + // Correcting this failure will involve changing a type this function + // depends on, hence triggering re-analysis of this function, so this + // interacts correctly with incremental compilation. + func.analysis(ip).state = .codegen_failure; + } else if (comp.bin_file) |lf| { lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { func.analysis(ip).state = .codegen_failure; }, else => { - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .func = func_index }), try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "unable to codegen: {s}", .{@errorName(err)}, )); func.analysis(ip).state = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { if (build_options.only_c) unreachable; llvm_object.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - func.analysis(ip).state = .codegen_failure; - }, }; } } @@ -3773,7 +3759,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) assert(decl.has_tv); - const func_as_depender = InternPool.AnalSubject.wrap(.{ .func = func_index }); + const func_as_depender = AnalUnit.wrap(.{ .func = func_index }); const is_outdated = mod.outdated.contains(func_as_depender) or mod.potentially_outdated.contains(func_as_depender); @@ -3792,7 +3778,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); + try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -3857,7 +3843,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa if (zcu.comp.debug_incremental) { try ip.addDependency( gpa, - InternPool.AnalSubject.wrap(.{ .decl = decl_index }), + AnalUnit.wrap(.{ .decl = decl_index }), .{ .src_hash = tracked_inst }, ); } @@ -3869,7 +3855,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa decl.analysis = .complete; try zcu.scanNamespace(namespace_index, decls, decl); - + try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -3906,7 +3892,7 @@ fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool { if (type_outdated) { // Invalidate the existing type, reusing the decl and namespace. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = file.root_decl.unwrap().? })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = file.root_decl.unwrap().? })); zcu.intern_pool.remove(decl.val.toIntern()); decl.val = undefined; _ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file); @@ -4097,7 +4083,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { break :ip_index .none; }; - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .decl = decl_index })); decl.analysis = .in_progress; @@ -4160,7 +4146,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { // Note this resolves the type of the Decl, not the value; if this Decl // is a struct, for example, this resolves `type` (which needs no resolution), // not the struct itself. - try sema.resolveTypeLayout(decl_ty); + try decl_ty.resolveLayout(mod); if (decl.kind == .@"usingnamespace") { if (!decl_ty.eql(Type.type, mod)) { @@ -4277,7 +4263,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { if (has_runtime_bits) { // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. - try sema.resolveTypeFully(decl_ty); + try decl_ty.resolveFully(mod); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); @@ -4293,6 +4279,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult { try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } + try sema.flushExports(); + return result; } @@ -4323,7 +4311,7 @@ fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult { // with a new Decl. // // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); zcu.intern_pool.remove(decl.val.toIntern()); decl.analysis = .dependency_failure; return .{ @@ -4525,7 +4513,7 @@ pub fn embedFile( mod: *Module, cur_file: *File, import_string: []const u8, - src_loc: SrcLoc, + src_loc: LazySrcLoc, ) !InternPool.Index { const gpa = mod.gpa; @@ -4600,7 +4588,7 @@ fn newEmbedFile( sub_file_path: []const u8, resolved_path: []const u8, result: **EmbedFile, - src_loc: SrcLoc, + src_loc: LazySrcLoc, ) !InternPool.Index { const gpa = mod.gpa; const ip = &mod.intern_pool; @@ -4949,63 +4937,85 @@ pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!vo } } -/// Delete all the Export objects that are caused by this Decl. Re-analysis of -/// this Decl will cause them to be re-created (or not). -fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { - var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value; +/// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of +/// this `AnalUnit` will cause them to be re-created (or not). +pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { + const gpa = zcu.gpa; - for (export_owners.items) |exp| { - switch (exp.exported) { - .decl_index => |exported_decl_index| { - if (mod.decl_exports.getPtr(exported_decl_index)) |export_list| { - // Remove exports with owner_decl matching the regenerating decl. - const list = export_list.items; - var i: usize = 0; - var new_len = list.len; - while (i < new_len) { - if (list[i].owner_decl == decl_index) { - mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); - new_len -= 1; - } else { - i += 1; - } - } - export_list.shrinkAndFree(mod.gpa, new_len); - if (new_len == 0) { - assert(mod.decl_exports.swapRemove(exported_decl_index)); - } - } - }, - .value => |value| { - if (mod.value_exports.getPtr(value)) |export_list| { - // Remove exports with owner_decl matching the regenerating decl. - const list = export_list.items; - var i: usize = 0; - var new_len = list.len; - while (i < new_len) { - if (list[i].owner_decl == decl_index) { - mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); - new_len -= 1; - } else { - i += 1; - } - } - export_list.shrinkAndFree(mod.gpa, new_len); - if (new_len == 0) { - assert(mod.value_exports.swapRemove(value)); - } - } - }, + const exports_base, const exports_len = if (zcu.single_exports.fetchSwapRemove(anal_unit)) |kv| + .{ kv.value, 1 } + else if (zcu.multi_exports.fetchSwapRemove(anal_unit)) |info| + .{ info.value.index, info.value.len } + else + return; + + const exports = zcu.all_exports.items[exports_base..][0..exports_len]; + + // In an only-c build, we're guaranteed to never use incremental compilation, so there are + // guaranteed not to be any exports in the output file that need deleting (since we only call + // `updateExports` on flush). + // This case is needed because in some rare edge cases, `Sema` wants to add and delete exports + // within a single update. + if (!build_options.only_c) { + for (exports, exports_base..) |exp, export_idx| { + if (zcu.comp.bin_file) |lf| { + lf.deleteExport(exp.exported, exp.opts.name); + } + if (zcu.failed_exports.fetchSwapRemove(@intCast(export_idx))) |failed_kv| { + failed_kv.value.destroy(gpa); + } } - if (mod.comp.bin_file) |lf| { - try lf.deleteDeclExport(decl_index, exp.opts.name); - } - if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { - failed_kv.value.destroy(mod.gpa); - } - mod.gpa.destroy(exp); } - export_owners.deinit(mod.gpa); + + zcu.free_exports.ensureUnusedCapacity(gpa, exports_len) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + return; + }; + for (exports_base..exports_base + exports_len) |export_idx| { + zcu.free_exports.appendAssumeCapacity(@intCast(export_idx)); + } +} + +/// Delete all references in `reference_table` which are caused by this `AnalUnit`. +/// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated. +fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { + const gpa = zcu.gpa; + + const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; + var idx = kv.value; + + while (idx != std.math.maxInt(u32)) { + zcu.free_references.append(gpa, idx) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + return; + }; + idx = zcu.all_references.items[idx].next; + } +} + +pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void { + const gpa = zcu.gpa; + + try zcu.reference_table.ensureUnusedCapacity(gpa, 1); + + const ref_idx = zcu.free_references.popOrNull() orelse idx: { + _ = try zcu.all_references.addOne(gpa); + break :idx zcu.all_references.items.len - 1; + }; + + errdefer comptime unreachable; + + const gop = zcu.reference_table.getOrPutAssumeCapacity(src_unit); + + zcu.all_references.items[ref_idx] = .{ + .referenced = referenced_unit, + .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32), + .src = ref_src, + }; + + gop.value_ptr.* = @intCast(ref_idx); } pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { @@ -5026,7 +5036,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); defer decl_prog_node.end(); - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalSubject.wrap(.{ .func = func_index })); + mod.intern_pool.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index })); var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -5245,22 +5255,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato else => |e| return e, }; - // Similarly, resolve any queued up types that were requested to be resolved for - // the backends. - for (sema.types_to_resolve.keys()) |ty| { - sema.resolveTypeFully(Type.fromInterned(ty)) catch |err| switch (err) { - error.GenericPoison => unreachable, - error.ComptimeReturn => unreachable, - error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => |e| return e, - }; - } + try sema.flushExports(); return .{ .instructions = sema.air_instructions.toOwnedSlice(), @@ -5341,17 +5336,13 @@ pub fn initNewAnonDecl( new_decl.analysis = .complete; } -pub fn errNoteNonLazy( +pub fn errNote( mod: *Module, - src_loc: SrcLoc, + src_loc: LazySrcLoc, parent: *ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - if (src_loc.lazy == .unneeded) { - assert(parent.src_loc.lazy == .unneeded); - return; - } const msg = try std.fmt.allocPrint(mod.gpa, format, args); errdefer mod.gpa.free(msg); @@ -5392,76 +5383,130 @@ fn lockAndClearFileCompileError(mod: *Module, file: *File) void { /// Called from `Compilation.update`, after everything is done, just before /// reporting compile errors. In this function we emit exported symbol collision /// errors and communicate exported symbols to the linker backend. -pub fn processExports(mod: *Module) !void { - // Map symbol names to `Export` for name collision detection. - var symbol_exports: SymbolExports = .{}; - defer symbol_exports.deinit(mod.gpa); +pub fn processExports(zcu: *Zcu) !void { + const gpa = zcu.gpa; - for (mod.decl_exports.keys(), mod.decl_exports.values()) |exported_decl, exports_list| { - const exported: Exported = .{ .decl_index = exported_decl }; - try processExportsInner(mod, &symbol_exports, exported, exports_list.items); + // First, construct a mapping of every exported value and Decl to the indices of all its different exports. + var decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(u32)) = .{}; + var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, ArrayListUnmanaged(u32)) = .{}; + defer { + for (decl_exports.values()) |*exports| { + exports.deinit(gpa); + } + decl_exports.deinit(gpa); + for (value_exports.values()) |*exports| { + exports.deinit(gpa); + } + value_exports.deinit(gpa); } - for (mod.value_exports.keys(), mod.value_exports.values()) |exported_value, exports_list| { + // We note as a heuristic: + // * It is rare to export a value. + // * It is rare for one Decl to be exported multiple times. + // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization. + try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); + + for (zcu.single_exports.values()) |export_idx| { + const exp = zcu.all_exports.items[export_idx]; + const value_ptr, const found_existing = switch (exp.exported) { + .decl_index => |i| gop: { + const gop = try decl_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + .value => |i| gop: { + const gop = try value_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + }; + if (!found_existing) value_ptr.* = .{}; + try value_ptr.append(gpa, export_idx); + } + + for (zcu.multi_exports.values()) |info| { + for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| { + const value_ptr, const found_existing = switch (exp.exported) { + .decl_index => |i| gop: { + const gop = try decl_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + .value => |i| gop: { + const gop = try value_exports.getOrPut(gpa, i); + break :gop .{ gop.value_ptr, gop.found_existing }; + }, + }; + if (!found_existing) value_ptr.* = .{}; + try value_ptr.append(gpa, @intCast(export_idx)); + } + } + + // Map symbol names to `Export` for name collision detection. + var symbol_exports: SymbolExports = .{}; + defer symbol_exports.deinit(gpa); + + for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| { + const exported: Exported = .{ .decl_index = exported_decl }; + try processExportsInner(zcu, &symbol_exports, exported, exports_list.items); + } + + for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| { const exported: Exported = .{ .value = exported_value }; - try processExportsInner(mod, &symbol_exports, exported, exports_list.items); + try processExportsInner(zcu, &symbol_exports, exported, exports_list.items); } } -const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export); +const SymbolExports = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, u32); fn processExportsInner( zcu: *Zcu, symbol_exports: *SymbolExports, exported: Exported, - exports: []const *Export, + export_indices: []const u32, ) error{OutOfMemory}!void { const gpa = zcu.gpa; - for (exports) |new_export| { + for (export_indices) |export_idx| { + const new_export = &zcu.all_exports.items[export_idx]; const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); if (gop.found_existing) { new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(zcu); - const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{ + const msg = try ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{ new_export.opts.name.fmt(&zcu.intern_pool), }); errdefer msg.destroy(gpa); - const other_export = gop.value_ptr.*; - const other_src_loc = other_export.getSrcLoc(zcu); - try zcu.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{}); - zcu.failed_exports.putAssumeCapacityNoClobber(new_export, msg); + const other_export = zcu.all_exports.items[gop.value_ptr.*]; + try zcu.errNote(other_export.src, msg, "other symbol here", .{}); + zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); new_export.status = .failed; } else { - gop.value_ptr.* = new_export; + gop.value_ptr.* = export_idx; } } if (zcu.comp.bin_file) |lf| { - try handleUpdateExports(zcu, exports, lf.updateExports(zcu, exported, exports)); + try handleUpdateExports(zcu, export_indices, lf.updateExports(zcu, exported, export_indices)); } else if (zcu.llvm_object) |llvm_object| { if (build_options.only_c) unreachable; - try handleUpdateExports(zcu, exports, llvm_object.updateExports(zcu, exported, exports)); + try handleUpdateExports(zcu, export_indices, llvm_object.updateExports(zcu, exported, export_indices)); } } fn handleUpdateExports( zcu: *Zcu, - exports: []const *Export, + export_indices: []const u32, result: link.File.UpdateExportsError!void, ) Allocator.Error!void { const gpa = zcu.gpa; result catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - const new_export = exports[0]; + const export_idx = export_indices[0]; + const new_export = &zcu.all_exports.items[export_idx]; new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); - const src_loc = new_export.getSrcLoc(zcu); - const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{ + const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{ @errorName(err), }); - zcu.failed_exports.putAssumeCapacityNoClobber(new_export, msg); + zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg); }, }; } @@ -5619,24 +5664,21 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { }, else => { const gpa = zcu.gpa; - try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); - zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + zcu.failed_analysis.putAssumeCapacityNoClobber(AnalUnit.wrap(.{ .decl = decl_index }), try ErrorMsg.create( gpa, - decl.navSrcLoc(zcu).upgrade(zcu), + decl.navSrcLoc(zcu), "unable to codegen: {s}", .{@errorName(err)}, )); decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalSubject.wrap(.{ .decl = decl_index })); + try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); }, }; } else if (zcu.llvm_object) |llvm_object| { if (build_options.only_c) unreachable; llvm_object.updateDecl(zcu, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - }, }; } } @@ -5652,9 +5694,8 @@ fn reportRetryableFileError( const err_msg = try ErrorMsg.create( mod.gpa, .{ - .file_scope = file, - .base_node = 0, - .lazy = .entire_file, + .base_node_inst = try mod.intern_pool.trackZir(mod.gpa, file, .main_struct_inst), + .offset = .entire_file, }, format, args, @@ -5684,14 +5725,6 @@ pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u } } -pub fn getDeclExports(mod: Module, decl_index: Decl.Index) []const *Export { - if (mod.decl_exports.get(decl_index)) |l| { - return l.items; - } else { - return &[0]*Export{}; - } -} - pub const Feature = enum { panic_fn, panic_unwrap_error, @@ -5786,6 +5819,16 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type return Type.fromInterned((try intern(mod, .{ .ptr_type = canon_info }))); } +/// Like `ptrType`, but if `info` specifies an `alignment`, first ensures the pointer +/// child type's alignment is resolved so that an invalid alignment is not used. +/// In general, prefer this function during semantic analysis. +pub fn ptrTypeSema(zcu: *Zcu, info: InternPool.Key.PtrType) SemaError!Type { + if (info.flags.alignment != .none) { + _ = try Type.fromInterned(info.child).abiAlignmentAdvanced(zcu, .sema); + } + return zcu.ptrType(info); +} + pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .child = child_type.toIntern() }); } @@ -6361,15 +6404,21 @@ pub fn unionAbiAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType) return max_align; } -/// Returns the field alignment, assuming the union is not packed. -/// Keep implementation in sync with `Sema.unionFieldAlignment`. -/// Prefer to call that function instead of this one during Sema. -pub fn unionFieldNormalAlignment(mod: *Module, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { - const ip = &mod.intern_pool; +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +pub fn unionFieldNormalAlignment(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32) Alignment { + return zcu.unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal) catch unreachable; +} + +/// Returns the field alignment of a non-packed union. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn unionFieldNormalAlignmentAdvanced(zcu: *Zcu, loaded_union: InternPool.LoadedUnionType, field_index: u32, strat: Type.ResolveStrat) SemaError!Alignment { + const ip = &zcu.intern_pool; + assert(loaded_union.flagsPtr(ip).layout != .@"packed"); const field_align = loaded_union.fieldAlign(ip, field_index); if (field_align != .none) return field_align; const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - return field_ty.abiAlignment(mod); + if (field_ty.isNoReturn(zcu)) return .none; + return (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; } /// Returns the index of the active field, given the current tag value @@ -6380,41 +6429,37 @@ pub fn unionTagFieldIndex(mod: *Module, loaded_union: InternPool.LoadedUnionType return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern()); } -/// Returns the field alignment of a non-packed struct in byte units. -/// Keep implementation in sync with `Sema.structFieldAlignment`. -/// asserts the layout is not packed. +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. pub fn structFieldAlignment( - mod: *Module, + zcu: *Zcu, explicit_alignment: InternPool.Alignment, field_ty: Type, layout: std.builtin.Type.ContainerLayout, ) Alignment { - assert(layout != .@"packed"); - if (explicit_alignment != .none) return explicit_alignment; - switch (layout) { - .@"packed" => unreachable, - .auto => { - if (mod.getTarget().ofmt == .c) { - return structFieldAlignmentExtern(mod, field_ty); - } else { - return field_ty.abiAlignment(mod); - } - }, - .@"extern" => return structFieldAlignmentExtern(mod, field_ty), - } + return zcu.structFieldAlignmentAdvanced(explicit_alignment, field_ty, layout, .normal) catch unreachable; } -/// Returns the field alignment of an extern struct in byte units. -/// This logic is duplicated in Type.abiAlignmentAdvanced. -pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment { - const ty_abi_align = field_ty.abiAlignment(mod); - - if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - return ty_abi_align.max(.@"16"); +/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed. +/// If `strat` is `.sema`, may perform type resolution. +pub fn structFieldAlignmentAdvanced( + zcu: *Zcu, + explicit_alignment: InternPool.Alignment, + field_ty: Type, + layout: std.builtin.Type.ContainerLayout, + strat: Type.ResolveStrat, +) SemaError!Alignment { + assert(layout != .@"packed"); + if (explicit_alignment != .none) return explicit_alignment; + const ty_abi_align = (try field_ty.abiAlignmentAdvanced(zcu, strat.toLazy())).scalar; + switch (layout) { + .@"packed" => unreachable, + .auto => if (zcu.getTarget().ofmt != .c) return ty_abi_align, + .@"extern" => {}, + } + // extern + if (field_ty.isAbiInt(zcu) and field_ty.intInfo(zcu).bits >= 128) { + return ty_abi_align.maxStrict(.@"16"); } - return ty_abi_align; } @@ -6440,3 +6485,62 @@ pub fn structPackedFieldBitOffset( } unreachable; // index out of bounds } + +pub const ResolvedReference = struct { + referencer: AnalUnit, + src: LazySrcLoc, +}; + +/// Returns a mapping from an `AnalUnit` to where it is referenced. +/// TODO: in future, this must be adapted to traverse from roots of analysis. That way, we can +/// use the returned map to determine which units have become unreferenced in an incremental update. +pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) { + const gpa = zcu.gpa; + + var result: std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) = .{}; + errdefer result.deinit(gpa); + + // This is not a sufficient size, but a lower bound. + try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count())); + + for (zcu.reference_table.keys(), zcu.reference_table.values()) |referencer, first_ref_idx| { + assert(first_ref_idx != std.math.maxInt(u32)); + var ref_idx = first_ref_idx; + while (ref_idx != std.math.maxInt(u32)) { + const ref = zcu.all_references.items[ref_idx]; + const gop = try result.getOrPut(gpa, ref.referenced); + if (!gop.found_existing) { + gop.value_ptr.* = .{ .referencer = referencer, .src = ref.src }; + } + ref_idx = ref.next; + } + } + + return result; +} + +pub fn getBuiltin(zcu: *Zcu, name: []const u8) Allocator.Error!Air.Inst.Ref { + const decl_index = try zcu.getBuiltinDecl(name); + zcu.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt"); + return Air.internedToRef(zcu.declPtr(decl_index).val.toIntern()); +} + +pub fn getBuiltinDecl(zcu: *Zcu, name: []const u8) Allocator.Error!InternPool.DeclIndex { + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const std_file = (zcu.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig")).file; + const std_namespace = zcu.declPtr(std_file.root_decl.unwrap().?).getOwnedInnerNamespace(zcu).?; + const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls); + const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); + zcu.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); + const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); + const name_str = try ip.getOrPutString(gpa, name, .no_embedded_nulls); + return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); +} + +pub fn getBuiltinType(zcu: *Zcu, name: []const u8) Allocator.Error!Type { + const ty_inst = try zcu.getBuiltin(name); + const ty = Type.fromInterned(ty_inst.toInterned() orelse @panic("std.builtin is corrupt")); + ty.resolveFully(zcu) catch @panic("std.builtin is corrupt"); + return ty; +} diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 48908db51b..14b9cce3a8 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -8,7 +8,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -331,7 +331,7 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index a783137a54..2588db6adc 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -22,7 +22,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 2a25dbf1be..5eeeee0fa2 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -3,7 +3,7 @@ const builtin = @import("builtin"); const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 60453cebe2..0423b63d23 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -8,7 +8,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: u32, /// MIR Instructions @@ -338,7 +338,7 @@ const Self = @This(); pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 3a9bfcf4b6..da19760d8b 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -11,7 +11,7 @@ const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; const assert = std.debug.assert; @@ -26,7 +26,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 1a434b3b8c..f88218bc57 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a5cdf8621b..3f01b74733 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -7,7 +7,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const link = @import("../../link.zig"); const Zcu = @import("../../Zcu.zig"); @@ -59,7 +59,7 @@ args: []MCValue, ret_mcv: InstTracking, fn_type: Type, arg_index: usize, -src_loc: Zcu.SrcLoc, +src_loc: Zcu.LazySrcLoc, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, @@ -696,7 +696,7 @@ const CallView = enum(u1) { pub fn generate( bin_file: *link.File, - src_loc: Zcu.SrcLoc, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/riscv64/Lower.zig b/src/arch/riscv64/Lower.zig index dda3f3cf2a..3d3dc8513f 100644 --- a/src/arch/riscv64/Lower.zig +++ b/src/arch/riscv64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Zcu.SrcLoc, +src_loc: Zcu.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 0753b142b1..80a533d880 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -431,7 +431,7 @@ pub const RegisterList = struct { const Mir = @This(); const std = @import("std"); const builtin = @import("builtin"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const assert = std.debug.assert; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index dd35fc41e5..042af564f6 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -2,7 +2,7 @@ const std = @import("std"); const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const InternPool = @import("../../InternPool.zig"); const Zcu = @import("../../Zcu.zig"); const assert = std.debug.assert; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 14500ed329..2416eb9176 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -21,7 +21,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const CodeGenError = codegen.CodeGenError; const Result = @import("../../codegen.zig").Result; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; @@ -64,7 +64,7 @@ args: []MCValue, ret_mcv: MCValue, fn_type: Type, arg_index: usize, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, stack_align: Alignment, /// MIR Instructions @@ -263,7 +263,7 @@ const BigTomb = struct { pub fn generate( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index acd605eebc..b509bb7c79 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -24,7 +24,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, code: *std.ArrayList(u8), prev_di_line: u32, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b1ebf9126d..2ecface64e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -13,7 +13,7 @@ const codegen = @import("../../codegen.zig"); const Zcu = @import("../../Zcu.zig"); const InternPool = @import("../../InternPool.zig"); const Decl = Zcu.Decl; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Compilation = @import("../../Compilation.zig"); const link = @import("../../link.zig"); @@ -765,7 +765,7 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { const mod = func.bin_file.base.comp.module.?; - const src_loc = func.decl.navSrcLoc(mod).upgrade(mod); + const src_loc = func.decl.navSrcLoc(mod); func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -1202,7 +1202,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, - src_loc: Zcu.SrcLoc, + src_loc: Zcu.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -3162,7 +3162,7 @@ fn lowerAnonDeclRef( } const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod).upgrade(mod)); + const res = try func.bin_file.lowerAnonDecl(decl_val, decl_align, func.decl.navSrcLoc(mod)); switch (res) { .ok => {}, .fail => |em| { diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index c41ea9ec55..73ef723345 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -257,7 +257,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { const comp = emit.bin_file.base.comp; const zcu = comp.module.?; const gpa = comp.gpa; - emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu).upgrade(zcu), format, args); + emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu), format, args); return error.EmitFail; } diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 03c68daa85..23097990ac 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -8,7 +8,7 @@ const std = @import("std"); const Target = std.Target; const assert = std.debug.assert; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); /// Defines how to pass a type as part of a function signature, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ea6f0f8a4e..def0edcac9 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -32,7 +32,7 @@ const Module = Zcu; const InternPool = @import("../../InternPool.zig"); const Alignment = InternPool.Alignment; const Target = std.Target; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Instruction = @import("encoder.zig").Instruction; @@ -74,7 +74,7 @@ va_info: union { ret_mcv: InstTracking, fn_type: Type, arg_index: u32, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, eflags_inst: ?Air.Inst.Index = null, @@ -795,7 +795,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -971,7 +971,7 @@ pub fn generate( pub fn generateLazy( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 058a0550d9..852d19132d 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -8,7 +8,7 @@ allocator: Allocator, mir: Mir, cc: std.builtin.CallingConvention, err_msg: ?*ErrorMsg = null, -src_loc: Module.SrcLoc, +src_loc: Module.LazySrcLoc, result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index f1117f16c1..05c0c9626c 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -537,6 +537,6 @@ const testing = std.testing; const InternPool = @import("../../InternPool.zig"); const Register = @import("bits.zig").Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Zcu = @import("../../Zcu.zig"); diff --git a/src/codegen.zig b/src/codegen.zig index b8662ed15b..5e25359d44 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -20,7 +20,7 @@ const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; const Target = std.Target; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zir = std.zig.Zir; const Alignment = InternPool.Alignment; @@ -47,7 +47,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, func_index: InternPool.Index, air: Air, liveness: Liveness, @@ -79,7 +79,7 @@ pub fn generateFunction( pub fn generateLazyFunction( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -105,7 +105,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian pub fn generateLazySymbol( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, lazy_sym: link.File.LazySymbol, // TODO don't use an "out" parameter like this; put it in the result instead alignment: *Alignment, @@ -171,7 +171,7 @@ pub fn generateLazySymbol( pub fn generateSymbol( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -618,7 +618,7 @@ pub fn generateSymbol( fn lowerPtr( bin_file: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ptr_val: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -683,7 +683,7 @@ const RelocInfo = struct { fn lowerAnonDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -730,7 +730,7 @@ fn lowerAnonDeclRef( fn lowerDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, decl_index: InternPool.DeclIndex, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, @@ -814,7 +814,7 @@ pub const GenResult = union(enum) { fn fail( gpa: Allocator, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, comptime format: []const u8, args: anytype, ) Allocator.Error!GenResult { @@ -825,7 +825,7 @@ pub const GenResult = union(enum) { fn genDeclRef( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, ptr_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { @@ -931,7 +931,7 @@ fn genDeclRef( fn genUnnamedConst( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { @@ -970,7 +970,7 @@ fn genUnnamedConst( pub fn genTypedValue( lf: *link.File, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, val: Value, owner_decl_index: InternPool.DeclIndex, ) CodeGenError!GenResult { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 94f8faa441..92e9edb433 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -9,7 +9,7 @@ const Zcu = @import("../Zcu.zig"); const Module = @import("../Package/Module.zig"); const Compilation = @import("../Compilation.zig"); const Value = @import("../Value.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const C = link.File.C; const Decl = Zcu.Decl; const trace = @import("../tracy.zig").trace; @@ -637,7 +637,7 @@ pub const DeclGen = struct { const zcu = dg.zcu; const decl_index = dg.pass.decl; const decl = zcu.declPtr(decl_index); - const src_loc = decl.navSrcLoc(zcu).upgrade(zcu); + const src_loc = decl.navSrcLoc(zcu); dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -731,8 +731,6 @@ pub const DeclGen = struct { if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index) return dg.renderDeclValue(writer, extern_func.decl, location); - if (decl.val.getVariable(zcu)) |variable| try dg.renderFwdDecl(decl_index, variable, .tentative); - // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug @@ -748,7 +746,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } try writer.writeByte('&'); - try dg.renderDeclName(writer, decl_index, 0); + try dg.renderDeclName(writer, decl_index); if (need_cast) try writer.writeByte(')'); } @@ -1765,19 +1763,22 @@ pub const DeclGen = struct { fn renderFunctionSignature( dg: *DeclGen, w: anytype, - fn_decl_index: InternPool.DeclIndex, + fn_val: Value, + fn_align: InternPool.Alignment, kind: CType.Kind, name: union(enum) { - export_index: u32, - ident: []const u8, + decl: InternPool.DeclIndex, fmt_ctype_pool_string: std.fmt.Formatter(formatCTypePoolString), + @"export": struct { + main_name: InternPool.NullTerminatedString, + extern_name: InternPool.NullTerminatedString, + }, }, ) !void { const zcu = dg.zcu; const ip = &zcu.intern_pool; - const fn_decl = zcu.declPtr(fn_decl_index); - const fn_ty = fn_decl.typeOf(zcu); + const fn_ty = fn_val.typeOf(zcu); const fn_ctype = try dg.ctypeFromType(fn_ty, kind); const fn_info = zcu.typeToFunc(fn_ty).?; @@ -1788,7 +1789,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) + if (fn_val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); @@ -1799,22 +1800,11 @@ pub const DeclGen = struct { trailing = .maybe_space; } - switch (kind) { - .forward => {}, - .complete => if (fn_decl.alignment.toByteUnits()) |a| { - try w.print("{}zig_align_fn({})", .{ trailing, a }); - trailing = .maybe_space; - }, - else => unreachable, - } - + try w.print("{}", .{trailing}); switch (name) { - .export_index => |export_index| { - try w.print("{}", .{trailing}); - try dg.renderDeclName(w, fn_decl_index, export_index); - }, - .ident => |ident| try w.print("{}{ }", .{ trailing, fmtIdent(ident) }), - .fmt_ctype_pool_string => |fmt| try w.print("{}{ }", .{ trailing, fmt }), + .decl => |decl_index| try dg.renderDeclName(w, decl_index), + .fmt_ctype_pool_string => |fmt| try w.print("{ }", .{fmt}), + .@"export" => |@"export"| try w.print("{ }", .{fmtIdent(@"export".extern_name.toSlice(ip))}), } try renderTypeSuffix( @@ -1833,44 +1823,30 @@ pub const DeclGen = struct { switch (kind) { .forward => { - if (fn_decl.alignment.toByteUnits()) |a| { - try w.print(" zig_align_fn({})", .{a}); - } + if (fn_align.toByteUnits()) |a| try w.print(" zig_align_fn({})", .{a}); switch (name) { - .export_index => |export_index| mangled: { - const maybe_exports = zcu.decl_exports.get(fn_decl_index); - const external_name = (if (maybe_exports) |exports| - exports.items[export_index].opts.name - else if (fn_decl.isExtern(zcu)) - fn_decl.name - else - break :mangled).toSlice(ip); - const is_mangled = isMangledIdent(external_name, true); - const is_export = export_index > 0; + .decl, .fmt_ctype_pool_string => {}, + .@"export" => |@"export"| { + const extern_name = @"export".extern_name.toSlice(ip); + const is_mangled = isMangledIdent(extern_name, true); + const is_export = @"export".extern_name != @"export".main_name; if (is_mangled and is_export) { try w.print(" zig_mangled_export({ }, {s}, {s})", .{ - fmtIdent(external_name), - fmtStringLiteral(external_name, null), - fmtStringLiteral( - maybe_exports.?.items[0].opts.name.toSlice(ip), - null, - ), + fmtIdent(extern_name), + fmtStringLiteral(extern_name, null), + fmtStringLiteral(@"export".main_name.toSlice(ip), null), }); } else if (is_mangled) { - try w.print(" zig_mangled_final({ }, {s})", .{ - fmtIdent(external_name), fmtStringLiteral(external_name, null), + try w.print(" zig_mangled({ }, {s})", .{ + fmtIdent(extern_name), fmtStringLiteral(extern_name, null), }); } else if (is_export) { try w.print(" zig_export({s}, {s})", .{ - fmtStringLiteral( - maybe_exports.?.items[0].opts.name.toSlice(ip), - null, - ), - fmtStringLiteral(external_name, null), + fmtStringLiteral(@"export".main_name.toSlice(ip), null), + fmtStringLiteral(extern_name, null), }); } }, - .ident, .fmt_ctype_pool_string => {}, } }, .complete => {}, @@ -2085,21 +2061,11 @@ pub const DeclGen = struct { try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); } - fn declIsGlobal(dg: *DeclGen, val: Value) bool { - const zcu = dg.zcu; - return switch (zcu.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| zcu.decl_exports.contains(variable.decl), - .extern_func => true, - .func => |func| zcu.decl_exports.contains(func.owner_decl), - else => unreachable, - }; - } - fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .new_local, .local => |i| try w.print("t{d}", .{i}), .constant => |val| try renderAnonDeclName(w, val), - .decl => |decl| try dg.renderDeclName(w, decl, 0), + .decl => |decl| try dg.renderDeclName(w, decl), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), else => unreachable, } @@ -2111,10 +2077,10 @@ pub const DeclGen = struct { .constant => |val| try renderAnonDeclName(w, val), .arg, .arg_array => unreachable, .field => |i| try w.print("f{d}", .{i}), - .decl => |decl| try dg.renderDeclName(w, decl, 0), + .decl => |decl| try dg.renderDeclName(w, decl), .decl_ref => |decl| { try w.writeByte('&'); - try dg.renderDeclName(w, decl, 0); + try dg.renderDeclName(w, decl); }, .undef => |ty| try dg.renderUndefValue(w, ty, .Other), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), @@ -2142,10 +2108,10 @@ pub const DeclGen = struct { .field => |i| try w.print("f{d}", .{i}), .decl => |decl| { try w.writeAll("(*"); - try dg.renderDeclName(w, decl, 0); + try dg.renderDeclName(w, decl); try w.writeByte(')'); }, - .decl_ref => |decl| try dg.renderDeclName(w, decl, 0), + .decl_ref => |decl| try dg.renderDeclName(w, decl), .undef => unreachable, .identifier => |ident| try w.print("(*{ })", .{fmtIdent(ident)}), .payload_identifier => |ident| try w.print("(*{ }.{ })", .{ @@ -2195,19 +2161,12 @@ pub const DeclGen = struct { dg: *DeclGen, decl_index: InternPool.DeclIndex, variable: InternPool.Key.Variable, - fwd_kind: enum { tentative, final }, ) !void { const zcu = dg.zcu; const decl = zcu.declPtr(decl_index); const fwd = dg.fwdDeclWriter(); - const is_global = variable.is_extern or dg.declIsGlobal(decl.val); - try fwd.writeAll(if (is_global) "zig_extern " else "static "); - const maybe_exports = zcu.decl_exports.get(decl_index); - const export_weak_linkage = if (maybe_exports) |exports| - exports.items[0].opts.linkage == .weak - else - false; - if (variable.is_weak_linkage or export_weak_linkage) try fwd.writeAll("zig_weak_linkage "); + try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static "); + if (variable.is_weak_linkage) try fwd.writeAll("zig_weak_linkage "); if (variable.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal "); try dg.renderTypeAndName( fwd, @@ -2217,38 +2176,17 @@ pub const DeclGen = struct { decl.alignment, .complete, ); - mangled: { - const external_name = (if (maybe_exports) |exports| - exports.items[0].opts.name - else if (variable.is_extern) - decl.name - else - break :mangled).toSlice(&zcu.intern_pool); - if (isMangledIdent(external_name, true)) { - try fwd.print(" zig_mangled_{s}({ }, {s})", .{ - @tagName(fwd_kind), - fmtIdent(external_name), - fmtStringLiteral(external_name, null), - }); - } - } try fwd.writeAll(";\n"); } - fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex, export_index: u32) !void { + fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void { const zcu = dg.zcu; const ip = &zcu.intern_pool; const decl = zcu.declPtr(decl_index); - if (zcu.decl_exports.get(decl_index)) |exports| { - try writer.print("{ }", .{ - fmtIdent(exports.items[export_index].opts.name.toSlice(ip)), - }); - } else if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| { - try writer.print("{ }", .{ - fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), - }); - } else { + if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| try writer.print("{ }", .{ + fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), + }) else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; @@ -2761,69 +2699,6 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("};\n"); } -fn genExports(o: *Object) !void { - const tracy = trace(@src()); - defer tracy.end(); - - const zcu = o.dg.zcu; - const ip = &zcu.intern_pool; - const decl_index = switch (o.dg.pass) { - .decl => |decl| decl, - .anon, .flush => return, - }; - const decl = zcu.declPtr(decl_index); - const fwd = o.dg.fwdDeclWriter(); - - const exports = zcu.decl_exports.get(decl_index) orelse return; - if (exports.items.len < 2) return; - - const is_variable_const = switch (ip.indexToKey(decl.val.toIntern())) { - .func => return for (exports.items[1..], 1..) |@"export", i| { - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); - try o.dg.renderFunctionSignature( - fwd, - decl_index, - .forward, - .{ .export_index = @intCast(i) }, - ); - try fwd.writeAll(";\n"); - }, - .extern_func => { - // TODO: when sema allows re-exporting extern decls - unreachable; - }, - .variable => |variable| variable.is_const, - else => true, - }; - for (exports.items[1..]) |@"export"| { - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); - const export_name = @"export".opts.name.toSlice(ip); - try o.dg.renderTypeAndName( - fwd, - decl.typeOf(zcu), - .{ .identifier = export_name }, - CQualifiers.init(.{ .@"const" = is_variable_const }), - decl.alignment, - .complete, - ); - if (isMangledIdent(export_name, true)) { - try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{ - fmtIdent(export_name), - fmtStringLiteral(export_name, null), - fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null), - }); - } else { - try fwd.print(" zig_export({s}, {s})", .{ - fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null), - fmtStringLiteral(export_name, null), - }); - } - try fwd.writeAll(";\n"); - } -} - pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void { const zcu = o.dg.zcu; const ip = &zcu.intern_pool; @@ -2885,19 +2760,19 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn const fn_info = fn_ctype.info(ctype_pool).function; const fn_name = fmtCTypePoolString(val.fn_name, lazy_ctype_pool); - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(fwd_decl_writer, fn_decl_index, .forward, .{ + const fwd = o.dg.fwdDeclWriter(); + try fwd.print("static zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(fwd, fn_decl.val, fn_decl.alignment, .forward, .{ .fmt_ctype_pool_string = fn_name, }); - try fwd_decl_writer.writeAll(";\n"); + try fwd.writeAll(";\n"); - try w.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ + try w.print("zig_{s} ", .{@tagName(key)}); + try o.dg.renderFunctionSignature(w, fn_decl.val, .none, .complete, .{ .fmt_ctype_pool_string = fn_name, }); try w.writeAll(" {\n return "); - try o.dg.renderDeclName(w, fn_decl_index, 0); + try o.dg.renderDeclName(w, fn_decl_index); try w.writeByte('('); for (0..fn_info.param_ctypes.len) |arg| { if (arg > 0) try w.writeAll(", "); @@ -2921,21 +2796,26 @@ pub fn genFunc(f: *Function) !void { o.code_header = std.ArrayList(u8).init(gpa); defer o.code_header.deinit(); - const is_global = o.dg.declIsGlobal(decl.val); - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("static "); + try o.dg.renderFunctionSignature( + fwd, + decl.val, + decl.alignment, + .forward, + .{ .decl = decl_index }, + ); + try fwd.writeAll(";\n"); - if (zcu.decl_exports.get(decl_index)) |exports| - if (exports.items[0].opts.linkage == .weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn "); - try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); - - try o.indent_writer.insertNewline(); - if (!is_global) try o.writer().writeAll("static "); if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)}); - try o.dg.renderFunctionSignature(o.writer(), decl_index, .complete, .{ .export_index = 0 }); + try o.dg.renderFunctionSignature( + o.writer(), + decl.val, + .none, + .complete, + .{ .decl = decl_index }, + ); try o.writer().writeByte(' '); // In case we need to use the header, populate it with a copy of the function @@ -2949,7 +2829,6 @@ pub fn genFunc(f: *Function) !void { const main_body = f.air.getMainBody(); try genBodyResolveState(f, undefined, &.{}, main_body, false); - try o.indent_writer.insertNewline(); // Take advantage of the free_locals map to bucket locals per type. All @@ -3007,20 +2886,25 @@ pub fn genDecl(o: *Object) !void { if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; if (decl.val.getExternFunc(zcu)) |_| { - const fwd_decl_writer = o.dg.fwdDeclWriter(); - try fwd_decl_writer.writeAll("zig_extern "); - try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 }); - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("zig_extern "); + try o.dg.renderFunctionSignature( + fwd, + decl.val, + decl.alignment, + .forward, + .{ .@"export" = .{ + .main_name = decl.name, + .extern_name = decl.name, + } }, + ); + try fwd.writeAll(";\n"); } else if (decl.val.getVariable(zcu)) |variable| { - try o.dg.renderFwdDecl(decl_index, variable, .final); - try genExports(o); + try o.dg.renderFwdDecl(decl_index, variable); if (variable.is_extern) return; - const is_global = variable.is_extern or o.dg.declIsGlobal(decl.val); const w = o.writer(); - if (!is_global) try w.writeAll("static "); if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal "); if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| @@ -3032,46 +2916,27 @@ pub fn genDecl(o: *Object) !void { try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { - const is_global = o.dg.zcu.decl_exports.contains(decl_index); const decl_c_value = .{ .decl = decl_index }; - try genDeclValue(o, decl.val, is_global, decl_c_value, decl.alignment, decl.@"linksection"); + try genDeclValue(o, decl.val, decl_c_value, decl.alignment, decl.@"linksection"); } } pub fn genDeclValue( o: *Object, val: Value, - is_global: bool, decl_c_value: CValue, alignment: Alignment, @"linksection": InternPool.OptionalNullTerminatedString, ) !void { const zcu = o.dg.zcu; - const fwd_decl_writer = o.dg.fwdDeclWriter(); - const ty = val.typeOf(zcu); - try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static "); - try o.dg.renderTypeAndName(fwd_decl_writer, ty, decl_c_value, Const, alignment, .complete); - switch (o.dg.pass) { - .decl => |decl_index| { - if (zcu.decl_exports.get(decl_index)) |exports| { - const export_name = exports.items[0].opts.name.toSlice(&zcu.intern_pool); - if (isMangledIdent(export_name, true)) { - try fwd_decl_writer.print(" zig_mangled_final({ }, {s})", .{ - fmtIdent(export_name), fmtStringLiteral(export_name, null), - }); - } - } - }, - .anon => {}, - .flush => unreachable, - } - try fwd_decl_writer.writeAll(";\n"); - try genExports(o); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("static "); + try o.dg.renderTypeAndName(fwd, ty, decl_c_value, Const, alignment, .complete); + try fwd.writeAll(";\n"); const w = o.writer(); - if (!is_global) try w.writeAll("static "); if (@"linksection".toSlice(&zcu.intern_pool)) |s| try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)}); try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete); @@ -3080,22 +2945,73 @@ pub fn genDeclValue( try w.writeAll(";\n"); } -pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const u32) !void { const zcu = dg.zcu; - const decl_index = dg.pass.decl; - const decl = zcu.declPtr(decl_index); - const writer = dg.fwdDeclWriter(); + const ip = &zcu.intern_pool; + const fwd = dg.fwdDeclWriter(); - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => if (dg.declIsGlobal(decl.val)) { - try writer.writeAll("zig_extern "); - try dg.renderFunctionSignature(writer, dg.pass.decl, .complete, .{ .export_index = 0 }); - try dg.fwd_decl.appendSlice(";\n"); + const main_name = zcu.all_exports.items[export_indices[0]].opts.name; + try fwd.writeAll("#define "); + switch (exported) { + .decl_index => |decl_index| try dg.renderDeclName(fwd, decl_index), + .value => |value| try DeclGen.renderAnonDeclName(fwd, Value.fromInterned(value)), + } + try fwd.writeByte(' '); + try fwd.print("{ }", .{fmtIdent(main_name.toSlice(ip))}); + try fwd.writeByte('\n'); + + const is_const = switch (ip.indexToKey(exported.getValue(zcu).toIntern())) { + .func, .extern_func => return for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); + try dg.renderFunctionSignature( + fwd, + exported.getValue(zcu), + exported.getAlign(zcu), + .forward, + .{ .@"export" = .{ + .main_name = main_name, + .extern_name = @"export".opts.name, + } }, + ); + try fwd.writeAll(";\n"); }, - else => {}, + .variable => |variable| variable.is_const, + else => true, + }; + for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); + const extern_name = @"export".opts.name.toSlice(ip); + const is_mangled = isMangledIdent(extern_name, true); + const is_export = @"export".opts.name != main_name; + try dg.renderTypeAndName( + fwd, + exported.getValue(zcu).typeOf(zcu), + .{ .identifier = extern_name }, + CQualifiers.init(.{ .@"const" = is_const }), + exported.getAlign(zcu), + .complete, + ); + if (is_mangled and is_export) { + try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{ + fmtIdent(extern_name), + fmtStringLiteral(extern_name, null), + fmtStringLiteral(main_name.toSlice(ip), null), + }); + } else if (is_mangled) { + try fwd.print(" zig_mangled({ }, {s})", .{ + fmtIdent(extern_name), fmtStringLiteral(extern_name, null), + }); + } else if (is_export) { + try fwd.print(" zig_export({s}, {s})", .{ + fmtStringLiteral(main_name.toSlice(ip), null), + fmtStringLiteral(extern_name, null), + }); + } + try fwd.writeAll(";\n"); } } @@ -4552,7 +4468,7 @@ fn airCall( }; }; switch (modifier) { - .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), + .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl), inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( @unionInit(LazyFnKey, @tagName(m), fn_decl), @unionInit(LazyFnValue.Data, @tagName(m), {}), diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index e316d7a154..0a0d84f061 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -2583,6 +2583,6 @@ const assert = std.debug.assert; const CType = @This(); const Module = @import("../../Package/Module.zig"); const std = @import("std"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); const DeclIndex = @import("../../InternPool.zig").DeclIndex; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 00cfd4404a..02933929c8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -22,7 +22,7 @@ const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const Value = @import("../Value.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const x86_64_abi = @import("../arch/x86_64/abi.zig"); const wasm_c_abi = @import("../arch/wasm/abi.zig"); const aarch64_c_abi = @import("../arch/aarch64/abi.zig"); @@ -848,10 +848,6 @@ pub const Object = struct { /// Note that the values are not added until `emit`, when all errors in /// the compilation are known. error_name_table: Builder.Variable.Index, - /// This map is usually very close to empty. It tracks only the cases when a - /// second extern Decl could not be emitted with the correct name due to a - /// name collision. - extern_collisions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, void), /// Memoizes a null `?usize` value. null_opt_usize: Builder.Constant, @@ -1011,7 +1007,6 @@ pub const Object = struct { .named_enum_map = .{}, .type_map = .{}, .error_name_table = .none, - .extern_collisions = .{}, .null_opt_usize = .no_init, .struct_field_map = .{}, }; @@ -1029,7 +1024,6 @@ pub const Object = struct { self.anon_decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); - self.extern_collisions.deinit(gpa); self.builder.deinit(); self.struct_field_map.deinit(gpa); self.* = undefined; @@ -1121,61 +1115,6 @@ pub const Object = struct { try object.builder.finishModuleAsm(); } - fn resolveExportExternCollisions(object: *Object) !void { - const mod = object.module; - - // This map has externs with incorrect symbol names. - for (object.extern_collisions.keys()) |decl_index| { - const global = object.decl_map.get(decl_index) orelse continue; - // Same logic as below but for externs instead of exports. - const decl_name = object.builder.strtabStringIfExists(mod.declPtr(decl_index).name.toSlice(&mod.intern_pool)) orelse continue; - const other_global = object.builder.getGlobal(decl_name) orelse continue; - if (other_global.toConst().getBase(&object.builder) == - global.toConst().getBase(&object.builder)) continue; - - try global.replace(other_global, &object.builder); - } - object.extern_collisions.clearRetainingCapacity(); - - for (mod.decl_exports.keys(), mod.decl_exports.values()) |decl_index, export_list| { - const global = object.decl_map.get(decl_index) orelse continue; - try resolveGlobalCollisions(object, global, export_list.items); - } - - for (mod.value_exports.keys(), mod.value_exports.values()) |val, export_list| { - const global = object.anon_decl_map.get(val) orelse continue; - try resolveGlobalCollisions(object, global, export_list.items); - } - } - - fn resolveGlobalCollisions( - object: *Object, - global: Builder.Global.Index, - export_list: []const *Module.Export, - ) !void { - const mod = object.module; - const global_base = global.toConst().getBase(&object.builder); - for (export_list) |exp| { - // Detect if the LLVM global has already been created as an extern. In such - // case, we need to replace all uses of it with this exported global. - const exp_name = object.builder.strtabStringIfExists(exp.opts.name.toSlice(&mod.intern_pool)) orelse continue; - - const other_global = object.builder.getGlobal(exp_name) orelse continue; - if (other_global.toConst().getBase(&object.builder) == global_base) continue; - - try global.takeName(other_global, &object.builder); - try other_global.replace(global, &object.builder); - // Problem: now we need to replace in the decl_map that - // the extern decl index points to this new global. However we don't - // know the decl index. - // Even if we did, a future incremental update to the extern would then - // treat the LLVM global as an extern rather than an export, so it would - // need a way to check that. - // This is a TODO that needs to be solved when making - // the LLVM backend support incremental compilation. - } - } - pub const EmitOptions = struct { pre_ir_path: ?[]const u8, pre_bc_path: ?[]const u8, @@ -1193,7 +1132,6 @@ pub const Object = struct { pub fn emit(self: *Object, options: EmitOptions) !void { { - try self.resolveExportExternCollisions(); try self.genErrorNameTable(); try self.genCmpLtErrorsLenFunction(); try self.genModuleLevelAssembly(); @@ -1698,8 +1636,7 @@ pub const Object = struct { const file = try o.getDebugFile(namespace.file_scope); const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = decl.val.getExternFunc(zcu) == null and - !zcu.decl_exports.contains(decl_index); + const is_internal_linkage = decl.val.getExternFunc(zcu) == null; const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu)); const subprogram = try o.builder.debugSubprogram( @@ -1752,7 +1689,7 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try zcu.failed_decls.put(zcu.gpa, decl_index, dg.err_msg.?); + try zcu.failed_analysis.put(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, @@ -1760,8 +1697,6 @@ pub const Object = struct { }; try fg.wip.finish(); - - try o.updateExports(zcu, .{ .decl_index = decl_index }, zcu.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: InternPool.DeclIndex) !void { @@ -1775,72 +1710,31 @@ pub const Object = struct { dg.genDecl() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try module.failed_analysis.put(module.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - try self.updateExports(module, .{ .decl_index = decl_index }, module.getDeclExports(decl_index)); } pub fn updateExports( self: *Object, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const decl_index = switch (exported) { .decl_index => |i| i, - .value => |val| return updateExportedValue(self, mod, val, exports), + .value => |val| return updateExportedValue(self, mod, val, export_indices), }; - const gpa = mod.gpa; const ip = &mod.intern_pool; - // If the module does not already have the function, we ignore this function call - // because we call `updateExports` at the end of `updateFunc` and `updateDecl`. - const global_index = self.decl_map.get(decl_index) orelse return; + const global_index = self.decl_map.get(decl_index).?; const decl = mod.declPtr(decl_index); const comp = mod.comp; - if (decl.isExtern(mod)) { - const decl_name = decl_name: { - if (mod.getTarget().isWasm() and decl.val.typeOf(mod).zigTypeTag(mod) == .Fn) { - if (decl.getOwnedExternFunc(mod).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) { - break :decl_name try self.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); - } - } - } - break :decl_name try self.builder.strtabString(decl.name.toSlice(ip)); - }; - if (self.builder.getGlobal(decl_name)) |other_global| { - if (other_global != global_index) { - try self.extern_collisions.put(gpa, decl_index, {}); - } - } - - try global_index.rename(decl_name, &self.builder); - global_index.setLinkage(.external, &self.builder); - global_index.setUnnamedAddr(.default, &self.builder); - if (comp.config.dll_export_fns) - global_index.setDllStorageClass(.default, &self.builder); - - if (decl.val.getVariable(mod)) |decl_var| { - global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( - if (decl_var.is_threadlocal) .generaldynamic else .default, - &self.builder, - ); - if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &self.builder); - } - } else if (exports.len != 0) { - const main_exp_name = try self.builder.strtabString(exports[0].opts.name.toSlice(ip)); - try global_index.rename(main_exp_name, &self.builder); - - if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal) - global_index.ptrConst(&self.builder).kind - .variable.setThreadLocal(.generaldynamic, &self.builder); - - return updateExportedGlobal(self, mod, global_index, exports); + if (export_indices.len != 0) { + return updateExportedGlobal(self, mod, global_index, export_indices); } else { const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(mod)).toSlice(ip)); try global_index.rename(fqn, &self.builder); @@ -1848,17 +1742,6 @@ pub const Object = struct { if (comp.config.dll_export_fns) global_index.setDllStorageClass(.default, &self.builder); global_index.setUnnamedAddr(.unnamed_addr, &self.builder); - if (decl.val.getVariable(mod)) |decl_var| { - const decl_namespace = mod.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.file_scope.mod.single_threaded; - global_index.ptrConst(&self.builder).kind.variable.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) - .generaldynamic - else - .default, - &self.builder, - ); - } } } @@ -1866,11 +1749,11 @@ pub const Object = struct { o: *Object, mod: *Module, exported_value: InternPool.Index, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const gpa = mod.gpa; const ip = &mod.intern_pool; - const main_exp_name = try o.builder.strtabString(exports[0].opts.name.toSlice(ip)); + const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip)); const global_index = i: { const gop = try o.anon_decl_map.getOrPut(gpa, exported_value); if (gop.found_existing) { @@ -1894,32 +1777,57 @@ pub const Object = struct { try variable_index.setInitializer(init_val, &o.builder); break :i global_index; }; - return updateExportedGlobal(o, mod, global_index, exports); + return updateExportedGlobal(o, mod, global_index, export_indices); } fn updateExportedGlobal( o: *Object, mod: *Module, global_index: Builder.Global.Index, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const comp = mod.comp; const ip = &mod.intern_pool; + const first_export = mod.all_exports.items[export_indices[0]]; + + // We will rename this global to have a name matching `first_export`. + // Successive exports become aliases. + // If the first export name already exists, then there is a corresponding + // extern global - we replace it with this global. + const first_exp_name = try o.builder.strtabString(first_export.opts.name.toSlice(ip)); + if (o.builder.getGlobal(first_exp_name)) |other_global| replace: { + if (other_global.toConst().getBase(&o.builder) == global_index.toConst().getBase(&o.builder)) { + break :replace; // this global already has the name we want + } + try global_index.takeName(other_global, &o.builder); + try other_global.replace(global_index, &o.builder); + // Problem: now we need to replace in the decl_map that + // the extern decl index points to this new global. However we don't + // know the decl index. + // Even if we did, a future incremental update to the extern would then + // treat the LLVM global as an extern rather than an export, so it would + // need a way to check that. + // This is a TODO that needs to be solved when making + // the LLVM backend support incremental compilation. + } else { + try global_index.rename(first_exp_name, &o.builder); + } + global_index.setUnnamedAddr(.default, &o.builder); if (comp.config.dll_export_fns) global_index.setDllStorageClass(.dllexport, &o.builder); - global_index.setLinkage(switch (exports[0].opts.linkage) { + global_index.setLinkage(switch (first_export.opts.linkage) { .internal => unreachable, .strong => .external, .weak => .weak_odr, .link_once => .linkonce_odr, }, &o.builder); - global_index.setVisibility(switch (exports[0].opts.visibility) { + global_index.setVisibility(switch (first_export.opts.visibility) { .default => .default, .hidden => .hidden, .protected => .protected, }, &o.builder); - if (exports[0].opts.section.toSlice(ip)) |section| + if (first_export.opts.section.toSlice(ip)) |section| switch (global_index.ptrConst(&o.builder).kind) { .variable => |impl_index| impl_index.setSection( try o.builder.string(section), @@ -1936,7 +1844,8 @@ pub const Object = struct { // The planned solution to this is https://github.com/ziglang/zig/issues/13265 // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. - for (exports[1..]) |exp| { + for (export_indices[1..]) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip)); if (o.builder.getGlobal(exp_name)) |global| { switch (global.ptrConst(&o.builder).kind) { @@ -1944,7 +1853,13 @@ pub const Object = struct { alias.setAliasee(global_index.toConst(), &o.builder); continue; }, - .variable, .function => {}, + .variable, .function => { + // This existing global is an `extern` corresponding to this export. + // Replace it with the global being exported. + // This existing global must be replaced with the alias. + try global.rename(.empty, &o.builder); + try global.replace(global_index, &o.builder); + }, .replaced => unreachable, } } @@ -2688,7 +2603,10 @@ pub const Object = struct { if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue; const field_size = Type.fromInterned(field_ty).abiSize(mod); - const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index)); + const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { + .@"packed" => .none, + .auto, .@"extern" => mod.unionFieldNormalAlignment(union_type, @intCast(field_index)), + }; const field_name = tag_type.names.get(ip)[field_index]; fields.appendAssumeCapacity(try o.builder.debugMemberType( @@ -4729,7 +4647,7 @@ pub const DeclGen = struct { const o = dg.object; const gpa = o.gpa; const mod = o.module; - const src_loc = dg.decl.navSrcLoc(mod).upgrade(mod); + const src_loc = dg.decl.navSrcLoc(mod); dg.err_msg = try Module.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } @@ -4762,36 +4680,77 @@ pub const DeclGen = struct { else => try o.lowerValue(init_val), }, &o.builder); + if (decl.val.getVariable(zcu)) |decl_var| { + const decl_namespace = zcu.namespacePtr(decl.src_namespace); + const single_threaded = decl_namespace.file_scope.mod.single_threaded; + variable_index.setThreadLocal( + if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, + &o.builder, + ); + } + const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = !o.module.decl_exports.contains(decl_index); const namespace = zcu.namespacePtr(decl.src_namespace); const owner_mod = namespace.file_scope.mod; - if (owner_mod.strip) return; + if (!owner_mod.strip) { + const debug_file = try o.getDebugFile(namespace.file_scope); - const debug_file = try o.getDebugFile(namespace.file_scope); + const debug_global_var = try o.builder.debugGlobalVar( + try o.builder.metadataString(decl.name.toSlice(ip)), // Name + try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name + debug_file, // File + debug_file, // Scope + line_number, + try o.lowerDebugType(decl.typeOf(zcu)), + variable_index, + .{ .local = !decl.isExtern(zcu) }, + ); - const debug_global_var = try o.builder.debugGlobalVar( - try o.builder.metadataString(decl.name.toSlice(ip)), // Name - try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name - debug_file, // File - debug_file, // Scope - line_number, - try o.lowerDebugType(decl.typeOf(zcu)), - variable_index, - .{ .local = is_internal_linkage }, - ); + const debug_expression = try o.builder.debugExpression(&.{}); - const debug_expression = try o.builder.debugExpression(&.{}); + const debug_global_var_expression = try o.builder.debugGlobalVarExpression( + debug_global_var, + debug_expression, + ); - const debug_global_var_expression = try o.builder.debugGlobalVarExpression( - debug_global_var, - debug_expression, - ); + variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder); + try o.debug_globals.append(o.gpa, debug_global_var_expression); + } + } - variable_index.setGlobalVariableExpression(debug_global_var_expression, &o.builder); - try o.debug_globals.append(o.gpa, debug_global_var_expression); + if (decl.isExtern(zcu)) { + const global_index = o.decl_map.get(decl_index).?; + + const decl_name = decl_name: { + if (zcu.getTarget().isWasm() and decl.typeOf(zcu).zigTypeTag(zcu) == .Fn) { + if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); + } + } + } + break :decl_name try o.builder.strtabString(decl.name.toSlice(ip)); + }; + + if (o.builder.getGlobal(decl_name)) |other_global| { + if (other_global != global_index) { + // Another global already has this name; just use it in place of this global. + try global_index.replace(other_global, &o.builder); + return; + } + } + + try global_index.rename(decl_name, &o.builder); + global_index.setLinkage(.external, &o.builder); + global_index.setUnnamedAddr(.default, &o.builder); + if (zcu.comp.config.dll_export_fns) + global_index.setDllStorageClass(.default, &o.builder); + + if (decl.val.getVariable(zcu)) |decl_var| { + if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); + } } } }; @@ -5193,7 +5152,6 @@ pub const FuncGen = struct { const fqn = try decl.fullyQualifiedName(zcu); - const is_internal_linkage = !zcu.decl_exports.contains(decl_index); const fn_ty = try zcu.funcType(.{ .param_types = &.{}, .return_type = .void_type, @@ -5211,7 +5169,7 @@ pub const FuncGen = struct { .sp_flags = .{ .Optimized = owner_mod.optimize_mode != .Debug, .Definition = true, - .LocalToUnit = is_internal_linkage, + .LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later! }, }, o.debug_compile_unit, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index ee163c3154..c56a5a799e 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -9,7 +9,7 @@ const Zcu = @import("../Zcu.zig"); /// Deprecated. const Module = Zcu; const Decl = Module.Decl; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); @@ -218,7 +218,7 @@ pub const Object = struct { decl_gen.genDecl() catch |err| switch (err) { error.CodegenFail => { - try mod.failed_decls.put(mod.gpa, decl_index, decl_gen.error_msg.?); + try mod.failed_analysis.put(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?); }, else => |other| { // There might be an error that happened *after* self.error_msg @@ -415,7 +415,7 @@ const DeclGen = struct { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const mod = self.module; - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod).upgrade(mod); + const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; @@ -6439,7 +6439,7 @@ const DeclGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod).upgrade(mod); + const src_loc = self.module.declPtr(self.decl_index).navSrcLoc(mod); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); diff --git a/src/link.zig b/src/link.zig index 75a9723f1c..298d81d80c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -18,7 +18,7 @@ const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const LlvmObject = @import("codegen/llvm.zig").Object; const lldMain = @import("main.zig").lldMain; @@ -606,12 +606,12 @@ pub const File = struct { base: *File, module: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) UpdateExportsError!void { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, exports); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateExports(module, exported, export_indices); }, } } @@ -646,7 +646,7 @@ pub const File = struct { base: *File, decl_val: InternPool.Index, decl_align: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerResult { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { @@ -671,21 +671,20 @@ pub const File = struct { } } - pub fn deleteDeclExport( + pub fn deleteExport( base: *File, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, - ) !void { + ) void { if (build_options.only_c) @compileError("unreachable"); switch (base.tag) { .plan9, - .c, .spirv, .nvptx, => {}, inline else => |tag| { - return @as(*tag.Type(), @fieldParentPtr("base", base)).deleteDeclExport(decl_index, name); + return @as(*tag.Type(), @fieldParentPtr("base", base)).deleteExport(exported, name); }, } } diff --git a/src/link/C.zig b/src/link/C.zig index e6830eac8c..21245c1e30 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -14,7 +14,7 @@ const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); @@ -39,6 +39,9 @@ anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, DeclBlock) = .{}, /// the keys of `anon_decls`. aligned_anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{}, +exported_decls: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, ExportedBlock) = .{}, +exported_values: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{}, + /// Optimization, `updateDecl` reuses this buffer rather than creating a new /// one with every call. fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{}, @@ -80,6 +83,11 @@ pub const DeclBlock = struct { } }; +/// Per-exported-symbol data. +pub const ExportedBlock = struct { + fwd_decl: String = String.empty, +}; + pub fn getString(this: C, s: String) []const u8 { return this.string_bytes.items[s.start..][0..s.len]; } @@ -238,9 +246,13 @@ pub fn updateFunc( function.deinit(); } + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - try zcu.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + function.object.dg.error_msg.?, + ); return; }, else => |e| return e, @@ -288,7 +300,7 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) }; const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none; - codegen.genDeclValue(&object, c_value.constant, false, c_value, alignment, .none) catch |err| switch (err) { + codegen.genDeclValue(&object, c_value.constant, c_value, alignment, .none) catch |err| switch (err) { error.AnalysisFail => { @panic("TODO: C backend AnalysisFail on anonymous decl"); //try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); @@ -351,9 +363,13 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { code.* = object.code.moveToUnmanaged(); } + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); + zcu.failed_analysis.putAssumeCapacityNoClobber( + InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + object.dg.error_msg.?, + ); return; }, else => |e| return e, @@ -451,20 +467,40 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo { var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); - try export_names.ensureTotalCapacity(gpa, @intCast(zcu.decl_exports.entries.len)); - for (zcu.decl_exports.values()) |exports| for (exports.items) |@"export"| - try export_names.put(gpa, @"export".opts.name, {}); - - for (self.anon_decls.values()) |*decl_block| { - try self.flushDeclBlock(zcu, zcu.root_mod, &f, decl_block, export_names, .none); + try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); + for (zcu.single_exports.values()) |export_index| { + export_names.putAssumeCapacity(zcu.all_exports.items[export_index].opts.name, {}); } + for (zcu.multi_exports.values()) |info| { + try export_names.ensureUnusedCapacity(gpa, info.len); + for (zcu.all_exports.items[info.index..][0..info.len]) |@"export"| { + export_names.putAssumeCapacity(@"export".opts.name, {}); + } + } + + for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock( + zcu, + zcu.root_mod, + &f, + decl_block, + self.exported_values.getPtr(value), + export_names, + .none, + ); for (self.decl_table.keys(), self.decl_table.values()) |decl_index, *decl_block| { const decl = zcu.declPtr(decl_index); - assert(decl.has_tv); - const extern_symbol_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; + const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; const mod = zcu.namespacePtr(decl.src_namespace).file_scope.mod; - try self.flushDeclBlock(zcu, mod, &f, decl_block, export_names, extern_symbol_name); + try self.flushDeclBlock( + zcu, + mod, + &f, + decl_block, + self.exported_decls.getPtr(decl_index), + export_names, + extern_name, + ); } } @@ -497,12 +533,27 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !vo f.file_size += lazy_fwd_decl_len; // Now the code. - const anon_decl_values = self.anon_decls.values(); - const decl_values = self.decl_table.values(); - try f.all_buffers.ensureUnusedCapacity(gpa, 1 + anon_decl_values.len + decl_values.len); + try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.anon_decls.count() + self.decl_table.count()) * 2); f.appendBufAssumeCapacity(self.lazy_code_buf.items); - for (anon_decl_values) |db| f.appendBufAssumeCapacity(self.getString(db.code)); - for (decl_values) |db| f.appendBufAssumeCapacity(self.getString(db.code)); + for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| f.appendCodeAssumeCapacity( + if (self.exported_values.contains(anon_decl)) + .default + else switch (zcu.intern_pool.indexToKey(anon_decl)) { + .extern_func => .zig_extern, + .variable => |variable| if (variable.is_extern) .zig_extern else .static, + else => .static, + }, + self.getString(decl_block.code), + ); + for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| f.appendCodeAssumeCapacity( + if (self.exported_decls.contains(decl_index)) + .default + else if (zcu.declPtr(decl_index).isExtern(zcu)) + .zig_extern + else + .static, + self.getString(decl_block.code), + ); const file = self.base.file.?; try file.setEndPos(f.file_size); @@ -532,6 +583,16 @@ const Flush = struct { f.file_size += buf.len; } + fn appendCodeAssumeCapacity(f: *Flush, storage: enum { default, zig_extern, static }, code: []const u8) void { + if (code.len == 0) return; + f.appendBufAssumeCapacity(switch (storage) { + .default => "\n", + .zig_extern => "\nzig_extern ", + .static => "\nstatic ", + }); + f.appendBufAssumeCapacity(code); + } + fn deinit(f: *Flush, gpa: Allocator) void { f.all_buffers.deinit(gpa); f.asm_buf.deinit(gpa); @@ -719,19 +780,20 @@ fn flushDeclBlock( zcu: *Zcu, mod: *Module, f: *Flush, - decl_block: *DeclBlock, + decl_block: *const DeclBlock, + exported_block: ?*const ExportedBlock, export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - extern_symbol_name: InternPool.OptionalNullTerminatedString, + extern_name: InternPool.OptionalNullTerminatedString, ) FlushDeclError!void { const gpa = self.base.comp.gpa; try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - fwd_decl: { - if (extern_symbol_name.unwrap()) |name| { - if (export_names.contains(name)) break :fwd_decl; - } - f.appendBufAssumeCapacity(self.getString(decl_block.fwd_decl)); - } + // avoid emitting extern decls that are already exported + if (extern_name.unwrap()) |name| if (export_names.contains(name)) return; + f.appendBufAssumeCapacity(self.getString(if (exported_block) |exported| + exported.fwd_decl + else + decl_block.fwd_decl)); } pub fn flushEmitH(zcu: *Zcu) !void { @@ -781,10 +843,58 @@ pub fn updateExports( self: *C, zcu: *Zcu, exported: Zcu.Exported, - exports: []const *Zcu.Export, + export_indices: []const u32, ) !void { - _ = exports; - _ = exported; - _ = zcu; - _ = self; + const gpa = self.base.comp.gpa; + const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { + .decl_index => |decl_index| .{ + zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).file_scope.mod, + .{ .decl = decl_index }, + self.decl_table.getPtr(decl_index).?, + (try self.exported_decls.getOrPut(gpa, decl_index)).value_ptr, + }, + .value => |value| .{ + zcu.root_mod, + .{ .anon = value }, + self.anon_decls.getPtr(value).?, + (try self.exported_values.getOrPut(gpa, value)).value_ptr, + }, + }; + const ctype_pool = &decl_block.ctype_pool; + const fwd_decl = &self.fwd_decl_buf; + fwd_decl.clearRetainingCapacity(); + var dg: codegen.DeclGen = .{ + .gpa = gpa, + .zcu = zcu, + .mod = mod, + .error_msg = null, + .pass = pass, + .is_naked_fn = false, + .fwd_decl = fwd_decl.toManaged(gpa), + .ctype_pool = decl_block.ctype_pool, + .scratch = .{}, + .anon_decl_deps = .{}, + .aligned_anon_decls = .{}, + }; + defer { + assert(dg.anon_decl_deps.count() == 0); + assert(dg.aligned_anon_decls.count() == 0); + fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + dg.scratch.deinit(gpa); + } + try codegen.genExports(&dg, exported, export_indices); + exported_block.* = .{ .fwd_decl = try self.addString(dg.fwd_decl.items) }; +} + +pub fn deleteExport( + self: *C, + exported: Zcu.Exported, + _: InternPool.NullTerminatedString, +) void { + switch (exported) { + .decl_index => |decl_index| _ = self.exported_decls.swapRemove(decl_index), + .value => |value| _ = self.exported_values.swapRemove(value), + } } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 4524441f3b..55028fc8ad 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1144,7 +1144,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1155,16 +1155,14 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; try self.updateDeclCode(decl_index, code, .FUNCTION); - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclIndex) !u32 { @@ -1181,11 +1179,11 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index }); defer gpa.free(sym_name); const ty = val.typeOf(mod); - const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod).upgrade(mod))) { + const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.navSrcLoc(mod))) { .ok => |atom_index| atom_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1199,7 +1197,7 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.SrcLoc) !LowerConstResult { +fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: InternPool.Alignment, sect_id: u16, src_loc: Module.LazySrcLoc) !LowerConstResult { const gpa = self.base.comp.gpa; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1272,23 +1270,21 @@ pub fn updateDecl( defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = atom.getSymbolIndex().?, }); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; try self.updateDeclCode(decl_index, code, .NULL); - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } fn updateLazySymbolAtom( @@ -1313,14 +1309,7 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, src, @@ -1509,7 +1498,7 @@ pub fn updateExports( self: *Coff, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); @@ -1522,7 +1511,8 @@ pub fn updateExports( if (comp.config.use_llvm) { // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exported_decl_index = switch (exp.exported) { .decl_index => |i| i, .value => continue, @@ -1552,7 +1542,7 @@ pub fn updateExports( } } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); const gpa = comp.gpa; @@ -1562,15 +1552,15 @@ pub fn updateExports( break :blk self.decls.getPtr(decl_index).?; }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { - const first_exp = exports[0]; - const res = try self.lowerAnonDecl(value, .none, first_exp.getSrcLoc(mod)); + const first_exp = mod.all_exports.items[export_indices[0]]; + const res = try self.lowerAnonDecl(value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(first_exp, em); + mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } @@ -1580,14 +1570,15 @@ pub fn updateExports( const atom_index = metadata.atom; const atom = self.getAtom(atom_index); - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)}); if (exp.opts.section.toSlice(&mod.intern_pool)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1596,9 +1587,9 @@ pub fn updateExports( } if (exp.opts.linkage == .link_once) { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.link_once", .{}, )); @@ -1641,13 +1632,16 @@ pub fn updateExports( } } -pub fn deleteDeclExport( +pub fn deleteExport( self: *Coff, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { if (self.llvm_object) |_| return; - const metadata = self.decls.getPtr(decl_index) orelse return; + const metadata = switch (exported) { + .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, + .value => |value| self.anon_decls.getPtr(value) orelse return, + }; const mod = self.base.comp.module.?; const name_slice = name.toSlice(&mod.intern_pool); const sym_index = metadata.getExportPtr(self, name_slice) orelse return; @@ -1866,7 +1860,7 @@ pub fn lowerAnonDecl( self: *Coff, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = self.base.comp.gpa; const mod = self.base.comp.module.?; @@ -2748,8 +2742,9 @@ const Object = @import("Coff/Object.zig"); const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; const StringTable = @import("StringTable.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); +const AnalUnit = InternPool.AnalUnit; pub const base_tag: link.File.Tag = .coff; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 7d576abbb4..2bb0a4c0a0 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2969,5 +2969,5 @@ const Zcu = @import("../Zcu.zig"); const Module = Zcu; const InternPool = @import("../InternPool.zig"); const StringTable = @import("StringTable.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index b1048dfe9d..7510dd4956 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -552,7 +552,7 @@ pub fn lowerAnonDecl( self: *Elf, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return self.zigObjectPtr().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); } @@ -3011,13 +3011,13 @@ pub fn updateExports( self: *Elf, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); - return self.zigObjectPtr().?.updateExports(self, mod, exported, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + return self.zigObjectPtr().?.updateExports(self, mod, exported, export_indices); } pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.DeclIndex) !void { @@ -3025,13 +3025,13 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: InternPool.Dec return self.zigObjectPtr().?.updateDeclLineNumber(mod, decl_index); } -pub fn deleteDeclExport( +pub fn deleteExport( self: *Elf, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.deleteDeclExport(self, decl_index, name); + return self.zigObjectPtr().?.deleteExport(self, exported, name); } fn addLinkerDefinedSymbols(self: *Elf) !void { diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index c2c5e879cb..56311dd64b 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -686,7 +686,7 @@ pub fn lowerAnonDecl( elf_file: *Elf, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = elf_file.base.comp.gpa; const mod = elf_file.base.comp.module.?; @@ -1074,7 +1074,7 @@ pub fn updateFunc( const res = if (decl_state) |*ds| try codegen.generateFunction( &elf_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1084,7 +1084,7 @@ pub fn updateFunc( else try codegen.generateFunction( &elf_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -1096,7 +1096,7 @@ pub fn updateFunc( .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1115,9 +1115,7 @@ pub fn updateFunc( ); } - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(elf_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } pub fn updateDecl( @@ -1158,13 +1156,13 @@ pub fn updateDecl( // TODO implement .debug_info for global variables const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .{ + try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .dwarf = ds, }, .{ .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .none, .{ + try codegen.generateSymbol(&elf_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ .parent_atom_index = sym_index, }); @@ -1172,7 +1170,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -1194,9 +1192,7 @@ pub fn updateDecl( ); } - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(elf_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } fn updateLazySymbol( @@ -1221,14 +1217,7 @@ fn updateLazySymbol( break :blk try self.strtab.insert(gpa, name); }; - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &elf_file.base, src, @@ -1306,12 +1295,12 @@ pub fn lowerUnnamedConst( val, ty.abiAlignment(mod), elf_file.zig_data_rel_ro_section_index.?, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), )) { .ok => |sym_index| sym_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1333,7 +1322,7 @@ fn lowerConst( val: Value, required_alignment: InternPool.Alignment, output_section_index: u32, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerConstResult { const gpa = elf_file.base.comp.gpa; @@ -1386,7 +1375,7 @@ pub fn updateExports( elf_file: *Elf, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1398,15 +1387,15 @@ pub fn updateExports( break :blk self.decls.getPtr(decl_index).?; }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { - const first_exp = exports[0]; - const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.getSrcLoc(mod)); + const first_exp = mod.all_exports.items[export_indices[0]]; + const res = try self.lowerAnonDecl(elf_file, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(first_exp, em); + mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } @@ -1418,13 +1407,14 @@ pub fn updateExports( const esym = self.local_esyms.items(.elf_sym)[esym_index]; const esym_shndx = self.local_esyms.items(.shndx)[esym_index]; - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice(".text", &mod.intern_pool)) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1437,9 +1427,9 @@ pub fn updateExports( .weak => elf.STB_WEAK, .link_once => { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.LinkOnce", .{}, )); @@ -1487,13 +1477,16 @@ pub fn updateDeclLineNumber( } } -pub fn deleteDeclExport( +pub fn deleteExport( self: *ZigObject, elf_file: *Elf, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { - const metadata = self.decls.getPtr(decl_index) orelse return; + const metadata = switch (exported) { + .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, + .value => |value| self.anon_decls.getPtr(value) orelse return, + }; const mod = elf_file.base.comp.module.?; const exp_name = name.toSlice(&mod.intern_pool); const esym_index = metadata.@"export"(self, exp_name) orelse return; @@ -1654,6 +1647,7 @@ const Module = Zcu; const Object = @import("Object.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 4dcc11ef53..3dd3d07e6b 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -3207,22 +3207,22 @@ pub fn updateExports( self: *MachO, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); - return self.getZigObject().?.updateExports(self, mod, exported, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + return self.getZigObject().?.updateExports(self, mod, exported, export_indices); } -pub fn deleteDeclExport( +pub fn deleteExport( self: *MachO, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, -) Allocator.Error!void { +) void { if (self.llvm_object) |_| return; - return self.getZigObject().?.deleteDeclExport(self, decl_index, name); + return self.getZigObject().?.deleteExport(self, exported, name); } pub fn freeDecl(self: *MachO, decl_index: InternPool.DeclIndex) void { @@ -3239,7 +3239,7 @@ pub fn lowerAnonDecl( self: *MachO, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return self.getZigObject().?.lowerAnonDecl(self, decl_val, explicit_alignment, src_loc); } diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 3f0e84d6a2..c022a30664 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -459,4 +459,4 @@ const trace = @import("../../tracy.zig").trace; const Allocator = mem.Allocator; const MachO = @import("../MachO.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index e2202d11fc..bb5ded654d 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -572,7 +572,7 @@ pub fn lowerAnonDecl( macho_file: *MachO, decl_val: InternPool.Index, explicit_alignment: Atom.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = macho_file.base.comp.gpa; const mod = macho_file.base.comp.module.?; @@ -682,7 +682,7 @@ pub fn updateFunc( const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; const res = try codegen.generateFunction( &macho_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -694,7 +694,7 @@ pub fn updateFunc( .ok => code_buffer.items, .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -713,9 +713,7 @@ pub fn updateFunc( ); } - // Since we updated the vaddr and the size, each corresponding export - // symbol also needs to be updated. - return self.updateExports(macho_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } pub fn updateDecl( @@ -756,7 +754,7 @@ pub fn updateDecl( const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; - const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, dio, .{ + const res = try codegen.generateSymbol(&macho_file.base, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{ .parent_atom_index = sym_index, }); @@ -764,7 +762,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -790,9 +788,7 @@ pub fn updateDecl( ); } - // Since we updated the vaddr and the size, each corresponding export symbol also - // needs to be updated. - try self.updateExports(macho_file, mod, .{ .decl_index = decl_index }, mod.getDeclExports(decl_index)); + // Exports will be updated by `Zcu.processExports` after the update. } fn updateDeclCode( @@ -1104,12 +1100,12 @@ pub fn lowerUnnamedConst( val, val.typeOf(mod).abiAlignment(mod), macho_file.zig_const_sect_index.?, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), )) { .ok => |sym_index| sym_index, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -1131,7 +1127,7 @@ fn lowerConst( val: Value, required_alignment: Atom.Alignment, output_section_index: u8, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !LowerConstResult { const gpa = macho_file.base.comp.gpa; @@ -1187,7 +1183,7 @@ pub fn updateExports( macho_file: *MachO, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1199,15 +1195,15 @@ pub fn updateExports( break :blk self.decls.getPtr(decl_index).?; }, .value => |value| self.anon_decls.getPtr(value) orelse blk: { - const first_exp = exports[0]; - const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.getSrcLoc(mod)); + const first_exp = mod.all_exports.items[export_indices[0]]; + const res = try self.lowerAnonDecl(macho_file, value, .none, first_exp.src); switch (res) { .ok => {}, .fail => |em| { // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(first_exp, em); + mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } @@ -1218,13 +1214,14 @@ pub fn updateExports( const nlist_idx = macho_file.getSymbol(sym_index).nlist_idx; const nlist = self.symtab.items(.nlist)[nlist_idx]; - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice("__text", &mod.intern_pool)) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: ExportOptions.section", .{}, )); @@ -1232,9 +1229,9 @@ pub fn updateExports( } } if (exp.opts.linkage == .link_once) { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, - exp.getSrcLoc(mod), + exp.src, "Unimplemented: GlobalLinkage.link_once", .{}, )); @@ -1294,14 +1291,7 @@ fn updateLazySymbol( break :blk try self.strtab.insert(gpa, name); }; - const src = if (lazy_sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &macho_file.base, src, @@ -1364,15 +1354,18 @@ pub fn updateDeclLineNumber(self: *ZigObject, mod: *Module, decl_index: InternPo } } -pub fn deleteDeclExport( +pub fn deleteExport( self: *ZigObject, macho_file: *MachO, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { const mod = macho_file.base.comp.module.?; - const metadata = self.decls.getPtr(decl_index) orelse return; + const metadata = switch (exported) { + .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, + .value => |value| self.anon_decls.getPtr(value) orelse return, + }; const nlist_index = metadata.@"export"(self, name.toSlice(&mod.intern_pool)) orelse return; log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)}); @@ -1594,6 +1587,7 @@ const Object = @import("Object.zig"); const Relocation = @import("Relocation.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 3d059acbb5..aa9ea1b5cd 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -96,12 +96,12 @@ pub fn updateExports( self: *NvPtx, module: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { if (build_options.skip_non_native and builtin.object_format != .nvptx) @panic("Attempted to compile for object format that was disabled by build configuration"); - return self.llvm_object.updateExports(module, exported, exports); + return self.llvm_object.updateExports(module, exported, export_indices); } pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index b15ec87815..96fbaf42c7 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -15,8 +15,9 @@ const File = link.File; const build_options = @import("build_options"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); +const AnalUnit = InternPool.AnalUnit; const std = @import("std"); const builtin = @import("builtin"); @@ -60,6 +61,9 @@ fn_decl_table: std.AutoArrayHashMapUnmanaged( ) = .{}, /// the code is modified when relocated, so that is why it is mutable data_decl_table: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, []u8) = .{}, +/// When `updateExports` is called, we store the export indices here, to be used +/// during flush. +decl_exports: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, []u32) = .{}, /// Table of unnamed constants associated with a parent `Decl`. /// We store them here so that we can free the constants whenever the `Decl` @@ -435,7 +439,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: const res = try codegen.generateFunction( &self.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -446,7 +450,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: .ok => try code_buffer.toOwnedSlice(), .fail => |em| { func.analysis(&mod.intern_pool).state = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -501,7 +505,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), val, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), val, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = new_atom_idx, @@ -510,7 +514,7 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, @@ -540,14 +544,14 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex) defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod).upgrade(mod), decl_val, &code_buffer, .{ .none = {} }, .{ + const res = try codegen.generateSymbol(&self.base, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), }); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -770,8 +774,8 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; - if (mod.decl_exports.get(decl_index)) |exports| { - try self.addDeclExports(mod, decl_index, exports.items); + if (self.decl_exports.get(decl_index)) |export_indices| { + try self.addDeclExports(mod, decl_index, export_indices); } } } @@ -836,8 +840,8 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; - if (mod.decl_exports.get(decl_index)) |exports| { - try self.addDeclExports(mod, decl_index, exports.items); + if (self.decl_exports.get(decl_index)) |export_indices| { + try self.addDeclExports(mod, decl_index, export_indices); } } // write the unnamed constants after the other data decls @@ -1007,22 +1011,23 @@ fn addDeclExports( self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { const gpa = self.base.comp.gpa; const metadata = self.decls.getPtr(decl_index).?; const atom = self.getAtom(metadata.index); - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; const exp_name = exp.opts.name.toSlice(&mod.intern_pool); // plan9 does not support custom sections if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice(".text", &mod.intern_pool) and !section_name.eqlSlice(".data", &mod.intern_pool)) { - try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.put(mod.gpa, export_idx, try Module.ErrorMsg.create( gpa, - mod.declPtr(decl_index).navSrcLoc(mod).upgrade(mod), + mod.declPtr(decl_index).navSrcLoc(mod), "plan9 does not support extra sections", .{}, )); @@ -1152,15 +1157,23 @@ pub fn updateExports( self: *Plan9, module: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { + const gpa = self.base.comp.gpa; switch (exported) { .value => @panic("TODO: plan9 updateExports handling values"), - .decl_index => |decl_index| _ = try self.seeDecl(decl_index), + .decl_index => |decl_index| { + _ = try self.seeDecl(decl_index); + if (self.decl_exports.fetchSwapRemove(decl_index)) |kv| { + gpa.free(kv.value); + } + try self.decl_exports.ensureUnusedCapacity(gpa, 1); + const duped_indices = try gpa.dupe(u32, export_indices); + self.decl_exports.putAssumeCapacityNoClobber(decl_index, duped_indices); + }, } - // we do all the things in flush + // all proper work is done in flush _ = module; - _ = exports; } pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index { @@ -1212,14 +1225,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol; // generate the code - const src = if (sym.ty.srcLocOrNull(mod)) |src| - src.upgrade(mod) - else - Module.SrcLoc{ - .file_scope = undefined, - .base_node = undefined, - .lazy = .unneeded, - }; + const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, src, @@ -1290,6 +1296,10 @@ pub fn deinit(self: *Plan9) void { gpa.free(self.syms.items[sym_index].name); } self.data_decl_table.deinit(gpa); + for (self.decl_exports.values()) |export_indices| { + gpa.free(export_indices); + } + self.decl_exports.deinit(gpa); self.syms.deinit(gpa); self.got_index_free_list.deinit(gpa); self.syms_index_free_list.deinit(gpa); @@ -1395,10 +1405,13 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const atom = self.getAtom(decl_metadata.index); const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); - if (self.base.comp.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.opts.name.toSlice(ip))) |exp_i| { - try self.writeSym(writer, self.syms.items[exp_i]); - }; + if (self.decl_exports.get(decl_index)) |export_indices| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; + if (decl_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { + try self.writeSym(writer, self.syms.items[exp_i]); + } + } } } } @@ -1442,13 +1455,16 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const atom = self.getAtom(decl_metadata.index); const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); - if (self.base.comp.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.opts.name.toSlice(ip))) |exp_i| { - const s = self.syms.items[exp_i]; - if (mem.eql(u8, s.name, "_start")) - self.entry_val = s.value; - try self.writeSym(writer, s); - }; + if (self.decl_exports.get(decl_index)) |export_indices| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; + if (decl_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { + const s = self.syms.items[exp_i]; + if (mem.eql(u8, s.name, "_start")) + self.entry_val = s.value; + try self.writeSym(writer, s); + } + } } } } @@ -1530,7 +1546,7 @@ pub fn lowerAnonDecl( self: *Plan9, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { _ = explicit_alignment; // This is basically the same as lowerUnnamedConst. diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 099d58bfa0..d1a8ff96c6 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -152,7 +152,7 @@ pub fn updateExports( self: *SpirV, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { const decl_index = switch (exported) { .decl_index => |i| i, @@ -177,7 +177,8 @@ pub fn updateExports( if ((!is_vulkan and execution_model == .Kernel) or (is_vulkan and (execution_model == .Fragment or execution_model == .Vertex))) { - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; try self.object.spv.declareEntryPoint( spv_decl_index, exp.opts.name.toSlice(&mod.intern_pool), diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 6476784a01..d14061fe78 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -33,7 +33,7 @@ const Zcu = @import("../Zcu.zig"); const Module = Zcu; const Object = @import("Wasm/Object.zig"); const Symbol = @import("Wasm/Symbol.zig"); -const Type = @import("../type.zig").Type; +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const ZigObject = @import("Wasm/ZigObject.zig"); @@ -1533,7 +1533,7 @@ pub fn lowerAnonDecl( wasm: *Wasm, decl_val: InternPool.Index, explicit_alignment: Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, decl_val, explicit_alignment, src_loc); } @@ -1542,26 +1542,26 @@ pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: lin return wasm.zigObjectPtr().?.getAnonDeclVAddr(wasm, decl_val, reloc_info); } -pub fn deleteDeclExport( +pub fn deleteExport( wasm: *Wasm, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { if (wasm.llvm_object) |_| return; - return wasm.zigObjectPtr().?.deleteDeclExport(wasm, decl_index, name); + return wasm.zigObjectPtr().?.deleteExport(wasm, exported, name); } pub fn updateExports( wasm: *Wasm, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, exports); - return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, exports); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateExports(mod, exported, export_indices); + return wasm.zigObjectPtr().?.updateExports(wasm, mod, exported, export_indices); } pub fn freeDecl(wasm: *Wasm, decl_index: InternPool.DeclIndex) void { diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index 1accf81c02..24fc66367a 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -269,7 +269,7 @@ pub fn updateDecl( const res = try codegen.generateSymbol( &wasm_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), val, &code_writer, .none, @@ -280,7 +280,7 @@ pub fn updateDecl( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -308,7 +308,7 @@ pub fn updateFunc( defer code_writer.deinit(); const result = try codegen.generateFunction( &wasm_file.base, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), func_index, air, liveness, @@ -320,7 +320,7 @@ pub fn updateFunc( .ok => code_writer.items, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, }; @@ -439,7 +439,7 @@ pub fn lowerAnonDecl( wasm_file: *Wasm, decl_val: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.SrcLoc, + src_loc: Module.LazySrcLoc, ) !codegen.Result { const gpa = wasm_file.base.comp.gpa; const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val); @@ -494,14 +494,14 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d else decl.navSrcLoc(mod); - switch (try zig_object.lowerConst(wasm_file, name, val, decl_src.upgrade(mod))) { + switch (try zig_object.lowerConst(wasm_file, name, val, decl_src)) { .ok => |atom_index| { try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index); return @intFromEnum(wasm_file.getAtom(atom_index).sym_index); }, .fail => |em| { decl.analysis = .codegen_failure; - try mod.failed_decls.put(mod.gpa, decl_index, em); + try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return error.CodegenFail; }, } @@ -512,7 +512,7 @@ const LowerConstResult = union(enum) { fail: *Module.ErrorMsg, }; -fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.SrcLoc) !LowerConstResult { +fn lowerConst(zig_object: *ZigObject, wasm_file: *Wasm, name: []const u8, val: Value, src_loc: Module.LazySrcLoc) !LowerConstResult { const gpa = wasm_file.base.comp.gpa; const mod = wasm_file.base.comp.module.?; @@ -833,13 +833,17 @@ pub fn getAnonDeclVAddr( return target_symbol_index; } -pub fn deleteDeclExport( +pub fn deleteExport( zig_object: *ZigObject, wasm_file: *Wasm, - decl_index: InternPool.DeclIndex, + exported: Zcu.Exported, name: InternPool.NullTerminatedString, ) void { const mod = wasm_file.base.comp.module.?; + const decl_index = switch (exported) { + .decl_index => |decl_index| decl_index, + .value => @panic("TODO: implement Wasm linker code for exporting a constant value"), + }; const decl_info = zig_object.decls_map.getPtr(decl_index) orelse return; if (decl_info.@"export"(zig_object, name.toSlice(&mod.intern_pool))) |sym_index| { const sym = zig_object.symbol(sym_index); @@ -856,7 +860,7 @@ pub fn updateExports( wasm_file: *Wasm, mod: *Module, exported: Module.Exported, - exports: []const *Module.Export, + export_indices: []const u32, ) !void { const decl_index = switch (exported) { .decl_index => |i| i, @@ -873,11 +877,12 @@ pub fn updateExports( const gpa = mod.gpa; log.debug("Updating exports for decl '{}'", .{decl.name.fmt(&mod.intern_pool)}); - for (exports) |exp| { + for (export_indices) |export_idx| { + const exp = mod.all_exports.items[export_idx]; if (exp.opts.section.toSlice(&mod.intern_pool)) |section| { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); @@ -908,9 +913,9 @@ pub fn updateExports( }, .strong => {}, // symbols are strong by default .link_once => { - try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(gpa, export_idx, try Module.ErrorMsg.create( gpa, - decl.navSrcLoc(mod).upgrade(mod), + decl.navSrcLoc(mod), "Unimplemented: LinkOnce", .{}, )); @@ -1247,7 +1252,8 @@ const Zcu = @import("../../Zcu.zig"); const Module = Zcu; const StringTable = @import("../StringTable.zig"); const Symbol = @import("Symbol.zig"); -const Type = @import("../../type.zig").Type; +const Type = @import("../../Type.zig"); const Value = @import("../../Value.zig"); const Wasm = @import("../Wasm.zig"); +const AnalUnit = InternPool.AnalUnit; const ZigObject = @This(); diff --git a/src/mutable_value.zig b/src/mutable_value.zig index 77c0827691..1806e6ba19 100644 --- a/src/mutable_value.zig +++ b/src/mutable_value.zig @@ -3,7 +3,7 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Zcu = @import("Zcu.zig"); const InternPool = @import("InternPool.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); /// We use a tagged union here because while it wastes a few bytes for some tags, having a fixed diff --git a/src/print_air.zig b/src/print_air.zig index 2dbaf3069f..85fbe87ec9 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -4,7 +4,7 @@ const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const Zcu = @import("Zcu.zig"); const Value = @import("Value.zig"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); const InternPool = @import("InternPool.zig"); diff --git a/src/print_value.zig b/src/print_value.zig index 7f75b05606..394f021049 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -2,7 +2,7 @@ //! It is a thin wrapper around a `Value` which also, redundantly, stores its `Type`. const std = @import("std"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. @@ -81,12 +81,12 @@ pub fn print( }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), - .lazy_align => |ty| if (opt_sema) |sema| { - const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; + .lazy_align => |ty| if (opt_sema != null) { + const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .sema)).scalar; try writer.print("{}", .{a.toByteUnits() orelse 0}); } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}), - .lazy_size => |ty| if (opt_sema) |sema| { - const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar; + .lazy_size => |ty| if (opt_sema != null) { + const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .sema)).scalar; try writer.print("{}", .{s}); } else try writer.print("@sizeOf({})", .{Type.fromInterned(ty).fmt(mod)}), }, diff --git a/src/register_manager.zig b/src/register_manager.zig index e1bc4d52fa..fb9afbbc01 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -5,7 +5,7 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Air = @import("Air.zig"); const StaticBitSet = std.bit_set.StaticBitSet; -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const Zcu = @import("Zcu.zig"); /// Deprecated. const Module = Zcu; diff --git a/src/target.zig b/src/target.zig index 08ccfbaaca..a253c1fa0b 100644 --- a/src/target.zig +++ b/src/target.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const Type = @import("type.zig").Type; +const Type = @import("Type.zig"); const AddressSpace = std.builtin.AddressSpace; const Alignment = @import("InternPool.zig").Alignment; const Feature = @import("Zcu.zig").Feature; diff --git a/src/type.zig b/src/type.zig deleted file mode 100644 index df93822273..0000000000 --- a/src/type.zig +++ /dev/null @@ -1,3617 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const Value = @import("Value.zig"); -const assert = std.debug.assert; -const Target = std.Target; -const Zcu = @import("Zcu.zig"); -/// Deprecated. -const Module = Zcu; -const log = std.log.scoped(.Type); -const target_util = @import("target.zig"); -const Sema = @import("Sema.zig"); -const InternPool = @import("InternPool.zig"); -const Alignment = InternPool.Alignment; -const Zir = std.zig.Zir; - -/// Both types and values are canonically represented by a single 32-bit integer -/// which is an index into an `InternPool` data structure. -/// This struct abstracts around this storage by providing methods only -/// applicable to types rather than values in general. -pub const Type = struct { - ip_index: InternPool.Index, - - pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { - return ty.zigTypeTagOrPoison(mod) catch unreachable; - } - - pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); - } - - pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { - return switch (self.zigTypeTag(mod)) { - .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), - .Optional => { - return self.optionalChild(mod).baseZigTypeTag(mod); - }, - else => |t| t, - }; - } - - pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag(mod)) { - .Int, - .Float, - .ComptimeFloat, - .ComptimeInt, - => true, - - .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), - - .Bool, - .Type, - .Void, - .ErrorSet, - .Fn, - .Opaque, - .AnyFrame, - .Enum, - .EnumLiteral, - => is_equality_cmp, - - .NoReturn, - .Array, - .Struct, - .Undefined, - .Null, - .ErrorUnion, - .Union, - .Frame, - => false, - - .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), - .Optional => { - if (!is_equality_cmp) return false; - return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); - }, - }; - } - - /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { - if (ty.zigTypeTag(mod) != .Pointer) return null; - const elem_ty = ty.childType(mod); - if (elem_ty.zigTypeTag(mod) != .Fn) return null; - return elem_ty; - } - - /// Asserts the type is a pointer. - pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; - } - - pub const ArrayInfo = struct { - elem_type: Type, - sentinel: ?Value = null, - len: u64, - }; - - pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { - return .{ - .len = self.arrayLen(mod), - .sentinel = self.sentinel(mod), - .elem_type = self.childType(mod), - }; - } - - pub fn ptrInfo(ty: Type, mod: *const Module) InternPool.Key.PtrType { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |p| p, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| p, - else => unreachable, - }, - else => unreachable, - }; - } - - pub fn eql(a: Type, b: Type, mod: *const Module) bool { - _ = mod; // TODO: remove this parameter - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - return a.toIntern() == b.toIntern(); - } - - pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = ty; - _ = unused_fmt_string; - _ = options; - _ = writer; - @compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()"); - } - - pub const Formatter = std.fmt.Formatter(format2); - - pub fn fmt(ty: Type, module: *Module) Formatter { - return .{ .data = .{ - .ty = ty, - .module = module, - } }; - } - - const FormatContext = struct { - ty: Type, - module: *Module, - }; - - fn format2( - ctx: FormatContext, - comptime unused_format_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { - comptime assert(unused_format_string.len == 0); - _ = options; - return print(ctx.ty, writer, ctx.module); - } - - pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) { - return .{ .data = ty }; - } - - /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the module. - pub fn dump( - start_type: Type, - comptime unused_format_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) @TypeOf(writer).Error!void { - _ = options; - comptime assert(unused_format_string.len == 0); - return writer.print("{any}", .{start_type.ip_index}); - } - - /// Prints a name suitable for `@typeName`. - /// TODO: take an `opt_sema` to pass to `fmtValue` when printing sentinels. - pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - const sign_char: u8 = switch (int_type.signedness) { - .signed => 'i', - .unsigned => 'u', - }; - return writer.print("{c}{d}", .{ sign_char, int_type.bits }); - }, - .ptr_type => { - const info = ty.ptrInfo(mod); - - if (info.sentinel != .none) switch (info.flags.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - .Slice => try writer.print("[:{}]", .{Value.fromInterned(info.sentinel).fmtValue(mod, null)}), - } else switch (info.flags.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.flags.alignment != .none or - info.packed_offset.host_size != 0 or - info.flags.vector_index != .none) - { - const alignment = if (info.flags.alignment != .none) - info.flags.alignment - else - Type.fromInterned(info.child).abiAlignment(mod); - try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); - - if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { - try writer.print(":{d}:{d}", .{ - info.packed_offset.bit_offset, info.packed_offset.host_size, - }); - } - if (info.flags.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.flags.vector_index != .none) { - try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.flags.address_space != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.flags.address_space)}); - } - if (info.flags.is_const) try writer.writeAll("const "); - if (info.flags.is_volatile) try writer.writeAll("volatile "); - if (info.flags.is_allowzero and info.flags.size != .C) try writer.writeAll("allowzero "); - - try print(Type.fromInterned(info.child), writer, mod); - return; - }, - .array_type => |array_type| { - if (array_type.sentinel == .none) { - try writer.print("[{d}]", .{array_type.len}); - try print(Type.fromInterned(array_type.child), writer, mod); - } else { - try writer.print("[{d}:{}]", .{ - array_type.len, - Value.fromInterned(array_type.sentinel).fmtValue(mod, null), - }); - try print(Type.fromInterned(array_type.child), writer, mod); - } - return; - }, - .vector_type => |vector_type| { - try writer.print("@Vector({d}, ", .{vector_type.len}); - try print(Type.fromInterned(vector_type.child), writer, mod); - try writer.writeAll(")"); - return; - }, - .opt_type => |child| { - try writer.writeByte('?'); - return print(Type.fromInterned(child), writer, mod); - }, - .error_union_type => |error_union_type| { - try print(Type.fromInterned(error_union_type.error_set_type), writer, mod); - try writer.writeByte('!'); - if (error_union_type.payload_type == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(error_union_type.payload_type), writer, mod); - } - return; - }, - .inferred_error_set_type => |func_index| { - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.funcOwnerDeclPtr(func_index); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - .error_set_type => |error_set_type| { - const names = error_set_type.names; - try writer.writeAll("error{"); - for (names.get(ip), 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.print("{}", .{name.fmt(ip)}); - } - try writer.writeAll("}"); - }, - .simple_type => |s| switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .noreturn, - .adhoc_inferred_error_set, - => return writer.writeAll(@tagName(s)), - - .null, - .undefined, - => try writer.print("@TypeOf({s})", .{@tagName(s)}), - - .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), - .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), - .address_space => try writer.writeAll("std.builtin.AddressSpace"), - .float_mode => try writer.writeAll("std.builtin.FloatMode"), - .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), - .call_modifier => try writer.writeAll("std.builtin.CallModifier"), - .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), - .export_options => try writer.writeAll("std.builtin.ExportOptions"), - .extern_options => try writer.writeAll("std.builtin.ExternOptions"), - .type_info => try writer.writeAll("std.builtin.Type"), - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.decl.unwrap()) |decl_index| { - const decl = mod.declPtr(decl_index); - try decl.renderFullyQualifiedName(mod, writer); - } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { - const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(mod, .empty, writer); - } else { - try writer.writeAll("@TypeOf(.{})"); - } - }, - .anon_struct_type => |anon_struct| { - if (anon_struct.types.len == 0) { - return writer.writeAll("@TypeOf(.{})"); - } - try writer.writeAll("struct{"); - for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { - if (i != 0) try writer.writeAll(", "); - if (val != .none) { - try writer.writeAll("comptime "); - } - if (anon_struct.names.len != 0) { - try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)}); - } - - try print(Type.fromInterned(field_ty), writer, mod); - - if (val != .none) { - try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(mod, null)}); - } - } - try writer.writeAll("}"); - }, - - .union_type => { - const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .opaque_type => { - const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_type => { - const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .func_type => |fn_info| { - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn ("); - const param_types = fn_info.param_types.get(&mod.intern_pool); - for (param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (std.math.cast(u5, i)) |index| { - if (fn_info.paramIsComptime(index)) { - try writer.writeAll("comptime "); - } - if (fn_info.paramIsNoalias(index)) { - try writer.writeAll("noalias "); - } - } - if (param_ty == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(param_ty), writer, mod); - } - } - if (fn_info.is_var_args) { - if (param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(Type.fromInterned(fn_info.return_type), writer, mod); - } - }, - .anyframe_type => |child| { - if (child == .none) return writer.writeAll("anyframe"); - try writer.writeAll("anyframe->"); - return print(Type.fromInterned(child), writer, mod); - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - } - } - - pub fn fromInterned(i: InternPool.Index) Type { - assert(i != .none); - return .{ .ip_index = i }; - } - - pub fn toIntern(ty: Type) InternPool.Index { - assert(ty.ip_index != .none); - return ty.ip_index; - } - - pub fn toValue(self: Type) Value { - return Value.fromInterned(self.toIntern()); - } - - const RuntimeBitsError = Module.CompileError || error{NeedLazy}; - - /// true if and only if the type takes up space in memory at runtime. - /// There are two reasons a type will return false: - /// * the type is a comptime-only type. For example, the type `type` itself. - /// - note, however, that a struct can have mixed fields and only the non-comptime-only - /// fields will count towards the ABI size. For example, `struct {T: type, x: i32}` - /// hasRuntimeBits()=true and abiSize()=4 - /// * the type has only one possible value, making its ABI size 0. - /// - an enum with an explicit tag type has the ABI size of the integer tag type, - /// making it one-possible-value only if the integer tag type has 0 bits. - /// When `ignore_comptime_only` is true, then types that are comptime-only - /// may return false positives. - pub fn hasRuntimeBitsAdvanced( - ty: Type, - mod: *Module, - ignore_comptime_only: bool, - strat: AbiAlignmentAdvancedStrat, - ) RuntimeBitsError!bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - // False because it is a comptime-only type. - .empty_struct_type => false, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.bits != 0, - .ptr_type => { - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - if (ignore_comptime_only) return true; - return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(ty)), - .eager => !comptimeOnly(ty, mod), - .lazy => error.NeedLazy, - }; - }, - .anyframe_type => true, - .array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and - try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .vector_type => |vector_type| return vector_type.len > 0 and - try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .opt_type => |child| { - const child_ty = Type.fromInterned(child); - if (child_ty.isNoReturn(mod)) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) return true; - return switch (strat) { - .sema => |sema| !(try sema.typeRequiresComptime(child_ty)), - .eager => !comptimeOnly(child_ty, mod), - .lazy => error.NeedLazy, - }; - }, - .error_union_type, - .error_set_type, - .inferred_error_set_type, - => true, - - // These are function *bodies*, not pointers. - // They return false here because they are comptime-only types. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .func_type => false, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .anyerror, - .adhoc_inferred_error_set, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => true, - - // These are false because they are comptime-only types. - .void, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - => false, - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_type.haveFieldTypes(ip)), - .lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy, - } - for (0..struct_type.field_types.len) |i| { - if (struct_type.comptime_bits.getBit(ip, i)) continue; - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .anon_struct_type => |tuple| { - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - if (val != .none) continue; // comptime field - if (try Type.fromInterned(field_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; - } - return false; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { - .none => { - if (union_type.flagsPtr(ip).status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_type.flagsPtr(ip).assumed_runtime_bits = true; - return true; - } - }, - .safety, .tagged => { - const tag_ty = union_type.tagTypePtr(ip).*; - // tag_ty will be `none` if this union's tag type is not resolved yet, - // in which case we want control flow to continue down below. - if (tag_ty != .none and - try Type.fromInterned(tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - { - return true; - } - }, - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), - .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) - return error.NeedLazy, - } - for (0..union_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]); - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - - .opaque_type => true, - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - /// true if and only if the type has a well-defined memory layout - /// readFrom/writeToMemory are supported only for types with a well- - /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .int_type, - .vector_type, - => true, - - .error_union_type, - .error_set_type, - .inferred_error_set_type, - .anon_struct_type, - .opaque_type, - .anyframe_type, - // These are function bodies, not function pointers. - .func_type, - => false, - - .array_type => |array_type| Type.fromInterned(array_type.child).hasWellDefinedLayout(mod), - .opt_type => ty.isPtrLikeOptional(mod), - .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .void, - => true, - - .anyerror, - .adhoc_inferred_error_set, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - .generic_poison, - => false, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - // Struct with no fields have a well-defined layout of no bits. - return struct_type.layout != .auto or struct_type.field_types.len == 0; - }, - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - return switch (union_type.flagsPtr(ip).runtime_tag) { - .none, .safety => union_type.flagsPtr(ip).layout != .auto, - .tagged => false, - }; - }, - .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { - .auto => false, - .explicit, .nonexhaustive => true, - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }; - } - - pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; - } - - pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; - } - - pub fn fnHasRuntimeBits(ty: Type, mod: *Module) bool { - return ty.fnHasRuntimeBitsAdvanced(mod, null) catch unreachable; - } - - /// Determines whether a function type has runtime bits, i.e. whether a - /// function with this type can exist at runtime. - /// Asserts that `ty` is a function type. - /// If `opt_sema` is not provided, asserts that the return type is sufficiently resolved. - pub fn fnHasRuntimeBitsAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { - const fn_info = mod.typeToFunc(ty).?; - if (fn_info.is_generic) return false; - if (fn_info.is_var_args) return true; - if (fn_info.cc == .Inline) return false; - return !try Type.fromInterned(fn_info.return_type).comptimeOnlyAdvanced(mod, opt_sema); - } - - pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { - switch (ty.zigTypeTag(mod)) { - .Fn => return ty.fnHasRuntimeBits(mod), - else => return ty.hasRuntimeBits(mod), - } - } - - /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(mod), - }; - } - - pub fn isNoReturn(ty: Type, mod: *Module) bool { - return mod.intern_pool.isNoReturn(ty.toIntern()); - } - - /// Returns `none` if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, mod: *Module) Alignment { - return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; - } - - pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| { - if (ptr_type.flags.alignment != .none) - return ptr_type.flags.alignment; - - if (opt_sema) |sema| { - const res = try Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } - - return (Type.fromInterned(ptr_type.child).abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - }, - .opt_type => |child| Type.fromInterned(child).ptrAlignmentAdvanced(mod, opt_sema), - else => unreachable, - }; - } - - pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.address_space, - .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, - else => unreachable, - }; - } - - /// Never returns `none`. Asserts that all necessary type resolution is already done. - pub fn abiAlignment(ty: Type, mod: *Module) Alignment { - return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - - /// May capture a reference to `ty`. - /// Returned value has type `comptime_int`. - pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { - .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), - } - } - - pub const AbiAlignmentAdvanced = union(enum) { - scalar: Alignment, - val: Value, - }; - - pub const AbiAlignmentAdvancedStrat = union(enum) { - eager, - lazy, - sema: *Sema, - }; - - /// If you pass `eager` you will get back `scalar` and assert the type is resolved. - /// In this case there will be no error, guaranteed. - /// If you pass `lazy` you may get back `scalar` or `val`. - /// If `val` is returned, a reference to `ty` has been captured. - /// If you pass `sema` you will get back `scalar` and resolve the type if - /// necessary, possibly returning a CompileError. - pub fn abiAlignmentAdvanced( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiAlignmentAdvanced { - const target = mod.getTarget(); - const use_llvm = mod.comp.config.use_llvm; - const ip = &mod.intern_pool; - - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; - - switch (ty.toIntern()) { - .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" }, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(int_type.bits, target, use_llvm) }; - }, - .ptr_type, .anyframe_type => { - return .{ .scalar = ptrAbiAlignment(target) }; - }, - .array_type => |array_type| { - return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return .{ .scalar = .@"1" }; - switch (mod.comp.getZigBackend()) { - else => { - const elem_bits: u32 = @intCast(try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema)); - if (elem_bits == 0) return .{ .scalar = .@"1" }; - const bytes = ((elem_bits * vector_type.len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return .{ .scalar = Alignment.fromByteUnits(alignment) }; - }, - .stage2_c => { - return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); - }, - .stage2_x86_64 => { - if (vector_type.child == .bool_type) { - if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; - if (vector_type.len > 128 and std.Target.x86.featureSetHas(target.cpu.features, .avx2)) return .{ .scalar = .@"32" }; - if (vector_type.len > 64) return .{ .scalar = .@"16" }; - const bytes = std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return .{ .scalar = Alignment.fromByteUnits(alignment) }; - } - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - if (elem_bytes == 0) return .{ .scalar = .@"1" }; - const bytes = elem_bytes * vector_type.len; - if (bytes > 32 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; - if (bytes > 16 and std.Target.x86.featureSetHas(target.cpu.features, .avx)) return .{ .scalar = .@"32" }; - return .{ .scalar = .@"16" }; - }, - } - }, - - .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, Type.fromInterned(info.payload_type)), - - .error_set_type, .inferred_error_set_type => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; - }, - - // represents machine code; not a pointer - .func_type => return .{ .scalar = target_util.defaultFunctionAlignment(target) }, - - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .anyopaque, - => return .{ .scalar = .@"1" }, - - .usize, - .isize, - => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target, use_llvm) }, - - .export_options, - .extern_options, - .type_info, - => return .{ .scalar = ptrAbiAlignment(target) }, - - .c_char => return .{ .scalar = cTypeAlign(target, .char) }, - .c_short => return .{ .scalar = cTypeAlign(target, .short) }, - .c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) }, - .c_int => return .{ .scalar = cTypeAlign(target, .int) }, - .c_uint => return .{ .scalar = cTypeAlign(target, .uint) }, - .c_long => return .{ .scalar = cTypeAlign(target, .long) }, - .c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) }, - .c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) }, - .c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) }, - .c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) }, - - .f16 => return .{ .scalar = .@"2" }, - .f32 => return .{ .scalar = cTypeAlign(target, .float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return .{ .scalar = cTypeAlign(target, .double) }, - else => return .{ .scalar = .@"8" }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return .{ .scalar = abiAlignment(u80_ty, mod) }; - }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return .{ .scalar = cTypeAlign(target, .longdouble) }, - else => return .{ .scalar = .@"16" }, - }, - - .anyerror, .adhoc_inferred_error_set => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" }; - return .{ .scalar = intAbiAlignment(bits, target, use_llvm) }; - }, - - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return .{ .scalar = .@"1" }, - - .noreturn => unreachable, - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))), - }, - .eager => {}, - } - return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(mod) }; - } - - const flags = struct_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - return switch (strat) { - .eager => unreachable, // struct alignment not resolved - .sema => |sema| .{ - .scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type), - }, - .lazy => .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }; - }, - .anon_struct_type => |tuple| { - var big_align: Alignment = .@"1"; - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - if (val != .none) continue; // comptime field - switch (try Type.fromInterned(field_ty).abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = big_align.max(field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }, - } - } - return .{ .scalar = big_align }; - }, - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - const flags = union_type.flagsPtr(ip).*; - if (flags.alignment != .none) return .{ .scalar = flags.alignment }; - - if (!union_type.haveLayout(ip)) switch (strat) { - .eager => unreachable, // union layout not resolved - .sema => |sema| return .{ .scalar = try sema.resolveUnionAlignment(ty, union_type) }, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }; - - return .{ .scalar = union_type.flagsPtr(ip).alignment }; - }, - .opaque_type => return .{ .scalar = .@"1" }, - .enum_type => return .{ - .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod), - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - } - } - - fn abiAlignmentAdvancedErrorUnion( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - payload_ty: Type, - ) Module.CompileError!AbiAlignmentAdvanced { - // This code needs to be kept in sync with the equivalent switch prong - // in abiSizeAdvanced. - const code_align = abiAlignment(Type.anyerror, mod); - switch (strat) { - .eager, .sema => { - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - return .{ .scalar = code_align }; - } - return .{ .scalar = code_align.max( - (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, - ) }; - }, - .lazy => { - switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) }, - .val => {}, - } - return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }; - }, - } - } - - fn abiAlignmentAdvancedOptional( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiAlignmentAdvanced { - const target = mod.getTarget(); - const child_type = ty.optionalChild(mod); - - switch (child_type.zigTypeTag(mod)) { - .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), - .NoReturn => return .{ .scalar = .@"1" }, - else => {}, - } - - switch (strat) { - .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - return .{ .scalar = .@"1" }; - } - return child_type.abiAlignmentAdvanced(mod, strat); - }, - .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| return .{ .scalar = x.max(.@"1") }, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.toIntern() }, - } }))) }, - }, - } - } - - /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { - switch (try ty.abiSizeAdvanced(mod, .lazy)) { - .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x), - } - } - - /// Asserts the type has the ABI size already resolved. - /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, mod: *Module) u64 { - return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; - } - - const AbiSizeAdvanced = union(enum) { - scalar: u64, - val: Value, - }; - - /// If you pass `eager` you will get back `scalar` and assert the type is resolved. - /// In this case there will be no error, guaranteed. - /// If you pass `lazy` you may get back `scalar` or `val`. - /// If `val` is returned, a reference to `ty` has been captured. - /// If you pass `sema` you will get back `scalar` and resolve the type if - /// necessary, possibly returning a CompileError. - pub fn abiSizeAdvanced( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiSizeAdvanced { - const target = mod.getTarget(); - const use_llvm = mod.comp.config.use_llvm; - const ip = &mod.intern_pool; - - switch (ty.toIntern()) { - .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target, use_llvm) }; - }, - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .array_type => |array_type| { - const len = array_type.lenIncludingSentinel(); - if (len == 0) return .{ .scalar = 0 }; - switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return .{ .scalar = len * elem_size }, - .val => switch (strat) { - .sema, .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - } - }, - .vector_type => |vector_type| { - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }; - const total_bytes = switch (mod.comp.getZigBackend()) { - else => total_bytes: { - const elem_bits = try Type.fromInterned(vector_type.child).bitSizeAdvanced(mod, opt_sema); - const total_bits = elem_bits * vector_type.len; - break :total_bytes (total_bits + 7) / 8; - }, - .stage2_c => total_bytes: { - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - break :total_bytes elem_bytes * vector_type.len; - }, - .stage2_x86_64 => total_bytes: { - if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; - const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); - break :total_bytes elem_bytes * vector_type.len; - }, - }; - return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) }; - }, - - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), - - .error_set_type, .inferred_error_set_type => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; - }, - - .error_union_type => |error_union_type| { - const payload_ty = Type.fromInterned(error_union_type.payload_type); - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const code_size = abiSize(Type.anyerror, mod); - if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(payload_ty, mod); - const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - }; - - var size: u64 = 0; - if (code_align.compare(.gt, payload_align)) { - size += code_size; - size = payload_align.forward(size); - size += payload_size; - size = code_align.forward(size); - } else { - size += payload_size; - size = code_align.forward(size); - size += code_size; - size = payload_align.forward(size); - } - return AbiSizeAdvanced{ .scalar = size }; - }, - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - => return AbiSizeAdvanced{ .scalar = 1 }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - const u80_ty: Type = .{ .ip_index = .u80_type }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; - }, - }, - - .usize, - .isize, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .anyopaque, - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return AbiSizeAdvanced{ .scalar = 0 }, - - .anyerror, .adhoc_inferred_error_set => { - const bits = mod.errorSetBits(); - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target, use_llvm) }; - }, - - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - - .type_info => unreachable, - .noreturn => unreachable, - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => switch (struct_type.layout) { - .@"packed" => { - if (struct_type.backingIntType(ip).* == .none) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }; - }, - .auto, .@"extern" => { - if (!struct_type.haveLayout(ip)) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }; - }, - }, - .eager => {}, - } - switch (struct_type.layout) { - .@"packed" => return .{ - .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod), - }, - .auto, .@"extern" => { - assert(struct_type.haveLayout(ip)); - return .{ .scalar = struct_type.size(ip).* }; - }, - } - }, - .anon_struct_type => |tuple| { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy, .eager => {}, - } - const field_count = tuple.types.len; - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ - .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))), - }, - .eager => {}, - } - - assert(union_type.haveLayout(ip)); - return .{ .scalar = union_type.size(ip).* }; - }, - .opaque_type => unreachable, // no size available - .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - } - } - - fn abiSizeAdvancedOptional( - ty: Type, - mod: *Module, - strat: AbiAlignmentAdvancedStrat, - ) Module.CompileError!AbiSizeAdvanced { - const child_ty = ty.optionalChild(mod); - - if (child_ty.isNoReturn(mod)) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - - if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - else => |e| return e, - })) return AbiSizeAdvanced{ .scalar = 1 }; - - if (ty.optionalReprIsPayload(mod)) { - return abiSizeAdvanced(child_ty, mod, strat); - } - - const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => return .{ .val = Value.fromInterned((try mod.intern(.{ .int = .{ - .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.toIntern() }, - } }))) }, - }, - }; - - // Optional types are represented as a struct with the child type as the first - // field and a boolean as the second. Since the child type's abi alignment is - // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal - // to the child type's ABI alignment. - return AbiSizeAdvanced{ - .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, - }; - } - - pub fn ptrAbiAlignment(target: Target) Alignment { - return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); - } - - pub fn intAbiSize(bits: u16, target: Target, use_llvm: bool) u64 { - return intAbiAlignment(bits, target, use_llvm).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); - } - - pub fn intAbiAlignment(bits: u16, target: Target, use_llvm: bool) Alignment { - return switch (target.cpu.arch) { - .x86 => switch (bits) { - 0 => .none, - 1...8 => .@"1", - 9...16 => .@"2", - 17...64 => .@"4", - else => .@"16", - }, - .x86_64 => switch (bits) { - 0 => .none, - 1...8 => .@"1", - 9...16 => .@"2", - 17...32 => .@"4", - 33...64 => .@"8", - else => switch (target_util.zigBackend(target, use_llvm)) { - .stage2_x86_64 => .@"8", - else => .@"16", - }, - }, - else => return Alignment.fromByteUnits(@min( - std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), - maxIntAlignment(target, use_llvm), - )), - }; - } - - pub fn maxIntAlignment(target: std.Target, use_llvm: bool) u16 { - return switch (target.cpu.arch) { - .avr => 1, - .msp430 => 2, - .xcore => 4, - - .arm, - .armeb, - .thumb, - .thumbeb, - .hexagon, - .mips, - .mipsel, - .powerpc, - .powerpcle, - .r600, - .amdgcn, - .riscv32, - .sparc, - .sparcel, - .s390x, - .lanai, - .wasm32, - .wasm64, - => 8, - - // For these, LLVMABIAlignmentOfType(i128) reports 8. Note that 16 - // is a relevant number in three cases: - // 1. Different machine code instruction when loading into SIMD register. - // 2. The C ABI wants 16 for extern structs. - // 3. 16-byte cmpxchg needs 16-byte alignment. - // Same logic for powerpc64, mips64, sparc64. - .powerpc64, - .powerpc64le, - .mips64, - .mips64el, - .sparc64, - => switch (target.ofmt) { - .c => 16, - else => 8, - }, - - .x86_64 => switch (target_util.zigBackend(target, use_llvm)) { - .stage2_x86_64 => 8, - else => 16, - }, - - // Even LLVMABIAlignmentOfType(i128) agrees on these targets. - .x86, - .aarch64, - .aarch64_be, - .aarch64_32, - .riscv64, - .bpfel, - .bpfeb, - .nvptx, - .nvptx64, - => 16, - - // Below this comment are unverified but based on the fact that C requires - // int128_t to be 16 bytes aligned, it's a safe default. - .spu_2, - .csky, - .arc, - .m68k, - .tce, - .tcele, - .le32, - .amdil, - .hsail, - .spir, - .kalimba, - .renderscript32, - .spirv, - .spirv32, - .shave, - .le64, - .amdil64, - .hsail64, - .spir64, - .renderscript64, - .ve, - .spirv64, - .dxil, - .loongarch32, - .loongarch64, - .xtensa, - => 16, - }; - } - - pub fn bitSize(ty: Type, mod: *Module) u64 { - return bitSizeAdvanced(ty, mod, null) catch unreachable; - } - - /// If you pass `opt_sema`, any recursive type resolutions will happen if - /// necessary, possibly returning a CompileError. Passing `null` instead asserts - /// the type is fully resolved, and there will be no error, guaranteed. - pub fn bitSizeAdvanced( - ty: Type, - mod: *Module, - opt_sema: ?*Sema, - ) Module.CompileError!u64 { - const target = mod.getTarget(); - const ip = &mod.intern_pool; - - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - - switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), - }, - .anyframe_type => return target.ptrBitWidth(), - - .array_type => |array_type| { - const len = array_type.lenIncludingSentinel(); - if (len == 0) return 0; - const elem_ty = Type.fromInterned(array_type.child); - const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, - (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, - ); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = Type.fromInterned(vector_type.child); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - - .error_set_type, .inferred_error_set_type => return mod.errorSetBits(), - - .error_union_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - .anyerror, - .adhoc_inferred_error_set, - => return mod.errorSetBits(), - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - - .atomic_order => unreachable, - .atomic_rmw_op => unreachable, - .calling_convention => unreachable, - .address_space => unreachable, - .float_mode => unreachable, - .reduce_op => unreachable, - .call_modifier => unreachable, - .prefetch_options => unreachable, - .export_options => unreachable, - .extern_options => unreachable, - .type_info => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - const is_packed = struct_type.layout == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); - } - if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(mod, opt_sema); - } - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, - - .anon_struct_type => { - if (opt_sema) |sema| try sema.resolveTypeFields(ty); - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - const is_packed = ty.containerLayout(mod) == .@"packed"; - if (opt_sema) |sema| { - try sema.resolveTypeFields(ty); - if (is_packed) try sema.resolveTypeLayout(ty); - } - if (!is_packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); - - var size: u64 = 0; - for (0..union_type.field_types.len) |field_index| { - const field_ty = union_type.field_types.get(ip)[field_index]; - size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema)); - } - - return size; - }, - .opaque_type => unreachable, - .enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - } - } - - /// Returns true if the type's layout is already resolved and it is safe - /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip), - .union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip), - .array_type => |array_type| { - if (array_type.lenIncludingSentinel() == 0) return true; - return Type.fromInterned(array_type.child).layoutIsResolved(mod); - }, - .opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod), - .error_union_type => |k| Type.fromInterned(k.payload_type).layoutIsResolved(mod), - else => true, - }; - } - - pub fn isSinglePointer(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.flags.size == .One, - else => false, - }; - } - - /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty, mod).?; - } - - /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.flags.size, - else => null, - }; - } - - pub fn isSlice(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, - else => false, - }; - } - - pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { - return Type.fromInterned(mod.intern_pool.slicePtrType(ty.toIntern())); - } - - pub fn isConstPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_const, - else => false, - }; - } - - pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, &mod.intern_pool); - } - - pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { - return switch (ip.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_volatile, - else => false, - }; - } - - pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, - .opt_type => true, - else => false, - }; - } - - pub fn isCPtr(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - else => false, - }; - } - - pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice => false, - .One, .Many, .C => true, - }, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| switch (p.flags.size) { - .Slice, .C => false, - .Many, .One => !p.flags.is_allowzero, - }, - else => false, - }, - else => false, - }; - } - - /// For pointer-like optionals, returns true, otherwise returns the allowzero property - /// of pointers. - pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { - if (ty.isPtrLikeOptional(mod)) { - return true; - } - return ty.ptrInfo(mod).flags.is_allowzero; - } - - /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { - .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, - .error_set_type, .inferred_error_set_type => true, - else => false, - }, - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - else => false, - }; - } - - /// Returns true if the type is optional and would be lowered to a single pointer - /// address value, using 0 for null. Note that this returns true for C pointers. - /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. - pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.flags.size == .C, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .Slice, .C => false, - .Many, .One => !ptr_type.flags.is_allowzero, - }, - else => false, - }, - else => false, - }; - } - - /// For *[N]T, returns [N]T. - /// For *T, returns T. - /// For [*]T, returns T. - pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, &mod.intern_pool); - } - - pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { - return Type.fromInterned(ip.childType(ty.toIntern())); - } - - /// For *[N]T, returns T. - /// For ?*T, returns T. - /// For ?*[N]T, returns T. - /// For ?[*]T, returns T. - /// For *T, returns T. - /// For [*]T, returns T. - /// For [N]T, returns T. - /// For []T, returns T. - /// For anyframe->T, returns T. - pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .One => Type.fromInterned(ptr_type.child).shallowElemType(mod), - .Many, .C, .Slice => Type.fromInterned(ptr_type.child), - }, - .anyframe_type => |child| { - assert(child != .none); - return Type.fromInterned(child); - }, - .vector_type => |vector_type| Type.fromInterned(vector_type.child), - .array_type => |array_type| Type.fromInterned(array_type.child), - .opt_type => |child| Type.fromInterned(mod.intern_pool.childType(child)), - else => unreachable, - }; - } - - fn shallowElemType(child_ty: Type, mod: *const Module) Type { - return switch (child_ty.zigTypeTag(mod)) { - .Array, .Vector => child_ty.childType(mod), - else => child_ty, - }; - } - - /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *Module) Type { - return switch (ty.zigTypeTag(mod)) { - .Vector => ty.childType(mod), - else => ty, - }; - } - - /// Asserts that the type is an optional. - /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child| Type.fromInterned(child), - .ptr_type => |ptr_type| b: { - assert(ptr_type.flags.size == .C); - break :b ty; - }, - else => unreachable, - }; - } - - /// Returns the tag type of a union, if the type is a union and it has a tag type. - /// Otherwise, returns `null`. - pub fn unionTagType(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .union_type => {}, - else => return null, - } - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { - .tagged => { - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); - return Type.fromInterned(union_type.enum_tag_ty); - }, - else => return null, - } - } - - /// Same as `unionTagType` but includes safety tag. - /// Codegen should use this version. - pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - if (!union_type.hasTag(ip)) return null; - assert(union_type.haveFieldTypes(ip)); - return Type.fromInterned(union_type.enum_tag_ty); - }, - else => null, - }; - } - - /// Asserts the type is a union; returns the tag type, even if the tag will - /// not be stored at runtime. - pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { - const union_obj = mod.typeToUnion(ty).?; - return Type.fromInterned(union_obj.enum_tag_ty); - } - - pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) ?Type { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - const union_fields = union_obj.field_types.get(ip); - const index = mod.unionTagFieldIndex(union_obj, enum_tag) orelse return null; - return Type.fromInterned(union_fields[index]); - } - - pub fn unionFieldTypeByIndex(ty: Type, index: usize, mod: *Module) Type { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - return Type.fromInterned(union_obj.field_types.get(ip)[index]); - } - - pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { - const union_obj = mod.typeToUnion(ty).?; - return mod.unionTagFieldIndex(union_obj, enum_tag); - } - - pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - const union_obj = mod.typeToUnion(ty).?; - for (union_obj.field_types.get(ip)) |field_ty| { - if (Type.fromInterned(field_ty).hasRuntimeBits(mod)) return false; - } - return true; - } - - /// Returns the type used for backing storage of this union during comptime operations. - /// Asserts the type is either an extern or packed union. - pub fn unionBackingType(ty: Type, mod: *Module) !Type { - return switch (ty.containerLayout(mod)) { - .@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }), - .@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))), - .auto => unreachable, - }; - } - - pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout { - const ip = &mod.intern_pool; - const union_obj = ip.loadUnionType(ty.toIntern()); - return mod.getUnionLayout(union_obj); - } - - pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).layout, - .anon_struct_type => .auto, - .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, - else => unreachable, - }; - } - - /// Asserts that the type is an error union. - pub fn errorUnionPayload(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type); - } - - /// Asserts that the type is an error union. - pub fn errorUnionSet(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.errorUnionSet(ty.toIntern())); - } - - /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type, .adhoc_inferred_error_set_type => false, - else => switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |error_set_type| error_set_type.names.len == 0, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .none, .anyerror_type => false, - else => |t| ip.indexToKey(t).error_set_type.names.len == 0, - }, - else => unreachable, - }, - }; - } - - /// Returns true if it is an error set that includes anyerror, false otherwise. - /// Note that the result may be a false negative if the type did not get error set - /// resolution prior to this call. - pub fn isAnyError(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type => true, - .adhoc_inferred_error_set_type => false, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, - else => false, - }, - }; - } - - pub fn isError(ty: Type, mod: *const Module) bool { - return switch (ty.zigTypeTag(mod)) { - .ErrorUnion, .ErrorSet => true, - else => false, - }; - } - - /// Returns whether ty, which must be an error set, includes an error `name`. - /// Might return a false negative if `ty` is an inferred error set and not fully - /// resolved yet. - pub fn errorSetHasFieldIp( - ip: *const InternPool, - ty: InternPool.Index, - name: InternPool.NullTerminatedString, - ) bool { - return switch (ty) { - .anyerror_type => true, - else => switch (ip.indexToKey(ty)) { - .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .anyerror_type => true, - .none => false, - else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, - }, - else => unreachable, - }, - }; - } - - /// Returns whether ty, which must be an error set, includes an error `name`. - /// Might return a false negative if `ty` is an inferred error set and not fully - /// resolved yet. - pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .anyerror_type => true, - else => switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |error_set_type| { - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return error_set_type.nameIndex(ip, field_name_interned) != null; - }, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .anyerror_type => true, - .none => false, - else => |t| { - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; - }, - }, - else => unreachable, - }, - }; - } - - /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return ty.arrayLenIp(&mod.intern_pool); - } - - pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { - return ip.aggregateTypeLen(ty.toIntern()); - } - - pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { - return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern()); - } - - pub fn vectorLen(ty: Type, mod: *const Module) u32 { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .vector_type => |vector_type| vector_type.len, - .anon_struct_type => |tuple| @intCast(tuple.types.len), - else => unreachable, - }; - } - - /// Asserts the type is an array, pointer or vector. - pub fn sentinel(ty: Type, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .vector_type, - .struct_type, - .anon_struct_type, - => null, - - .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, - .ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, - - else => unreachable, - }; - } - - /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type, mod: *const Module) bool { - return self.toIntern() != .comptime_int_type and - mod.intern_pool.isIntegerType(self.toIntern()); - } - - /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .c_char_type => mod.getTarget().charSignedness() == .signed, - .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.signedness == .signed, - else => false, - }, - }; - } - - /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .c_char_type => mod.getTarget().charSignedness() == .unsigned, - .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => |int_type| int_type.signedness == .unsigned, - else => false, - }, - }; - } - - /// Returns true for integers, enums, error sets, and packed structs. - /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Int, .Enum, .ErrorSet => true, - .Struct => ty.containerLayout(mod) == .@"packed", - else => false, - }; - } - - /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { - const ip = &mod.intern_pool; - const target = mod.getTarget(); - var ty = starting_ty; - - while (true) switch (ty.toIntern()) { - .anyerror_type, .adhoc_inferred_error_set_type => { - return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; - }, - .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char_type => return .{ .signedness = mod.getTarget().charSignedness(), .bits = target.c_type_bit_size(.char) }, - .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| return int_type, - .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), - .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), - .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), - - .error_set_type, .inferred_error_set_type => { - return .{ .signedness = .unsigned, .bits = mod.errorSetBits() }; - }, - - .anon_struct_type => unreachable, - - .ptr_type => unreachable, - .anyframe_type => unreachable, - .array_type => unreachable, - - .opt_type => unreachable, - .error_union_type => unreachable, - .func_type => unreachable, - .simple_type => unreachable, // handled via Index enum tag above - - .union_type => unreachable, - .opaque_type => unreachable, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - pub fn isNamedInt(ty: Type) bool { - return switch (ty.toIntern()) { - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - => true, - - else => false, - }; - } - - /// Returns `false` for `comptime_float`. - pub fn isRuntimeFloat(ty: Type) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - => true, - - else => false, - }; - } - - /// Returns `true` for `comptime_float`. - pub fn isAnyFloat(ty: Type) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - .comptime_float_type, - => true, - - else => false, - }; - } - - /// Asserts the type is a fixed-size float or comptime_float. - /// Returns 128 for comptime_float types. - pub fn floatBits(ty: Type, target: Target) u16 { - return switch (ty.toIntern()) { - .f16_type => 16, - .f32_type => 32, - .f64_type => 64, - .f80_type => 80, - .f128_type, .comptime_float_type => 128, - .c_longdouble_type => target.c_type_bit_size(.longdouble), - - else => unreachable, - }; - } - - /// Asserts the type is a function or a function pointer. - pub fn fnReturnType(ty: Type, mod: *Module) Type { - return Type.fromInterned(mod.intern_pool.funcTypeReturnType(ty.toIntern())); - } - - /// Asserts the type is a function. - pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { - return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; - } - - pub fn isValidParamType(self: Type, mod: *const Module) bool { - return switch (self.zigTypeTagOrPoison(mod) catch return true) { - .Opaque, .NoReturn => false, - else => true, - }; - } - - pub fn isValidReturnType(self: Type, mod: *const Module) bool { - return switch (self.zigTypeTagOrPoison(mod) catch return true) { - .Opaque => false, - else => true, - }; - } - - /// Asserts the type is a function. - pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { - return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; - } - - pub fn isNumeric(ty: Type, mod: *const Module) bool { - return switch (ty.toIntern()) { - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .c_longdouble_type, - .comptime_int_type, - .comptime_float_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - => true, - - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => true, - else => false, - }, - }; - } - - /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which - /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { - var ty = starting_type; - const ip = &mod.intern_pool; - while (true) switch (ty.toIntern()) { - .empty_struct_type => return Value.empty_struct, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => |int_type| { - if (int_type.bits == 0) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - - .ptr_type, - .error_union_type, - .func_type, - .anyframe_type, - .error_set_type, - .inferred_error_set_type, - => return null, - - inline .array_type, .vector_type => |seq_type, seq_tag| { - const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; - if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = &.{} }, - } }))); - if (try Type.fromInterned(seq_type.child).onePossibleValue(mod)) |opv| { - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .repeated_elem = opv.toIntern() }, - } }))); - } - return null; - }, - .opt_type => |child| { - if (child == .noreturn_type) { - return try mod.nullValue(ty); - } else { - return null; - } - }, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .enum_literal, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .adhoc_inferred_error_set, - => return null, - - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, - - .generic_poison => unreachable, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.haveFieldTypes(ip)); - if (struct_type.knownNonOpv(ip)) - return null; - const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len); - defer mod.gpa.free(field_vals); - for (field_vals, 0..) |*field_val, i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) { - assert(struct_type.haveFieldInits(ip)); - field_val.* = struct_type.field_inits.get(ip)[i]; - continue; - } - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - if (try field_ty.onePossibleValue(mod)) |field_opv| { - field_val.* = field_opv.toIntern(); - } else return null; - } - - // In this case the struct has no runtime-known fields and - // therefore has one possible value. - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, - } }))); - }, - - .anon_struct_type => |tuple| { - for (tuple.values.get(ip)) |val| { - if (val == .none) return null; - } - // In this case the struct has all comptime-known fields and - // therefore has one possible value. - // TODO: write something like getCoercedInts to avoid needing to dupe - const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip)); - defer mod.gpa.free(duped_values); - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = duped_values }, - } }))); - }, - - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse - return null; - if (union_obj.field_types.len == 0) { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); - return Value.fromInterned(only); - } - const only_field_ty = union_obj.field_types.get(ip)[0]; - const val_val = (try Type.fromInterned(only_field_ty).onePossibleValue(mod)) orelse - return null; - const only = try mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = tag_val.toIntern(), - .val = val_val.toIntern(), - } }); - return Value.fromInterned(only); - }, - .opaque_type => return null, - .enum_type => { - const enum_type = ip.loadEnumType(ty.toIntern()); - switch (enum_type.tag_mode) { - .nonexhaustive => { - if (enum_type.tag_ty == .comptime_int_type) return null; - - if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = int_opv.toIntern(), - } }); - return Value.fromInterned(only); - } - - return null; - }, - .auto, .explicit => { - if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null; - - switch (enum_type.names.len) { - 0 => { - const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); - return Value.fromInterned(only); - }, - 1 => { - if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), - } }); - return Value.fromInterned(only); - } else { - return Value.fromInterned(enum_type.values.get(ip)[0]); - } - }, - else => return null, - } - }, - } - }, - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - /// During semantic analysis, instead call `Sema.typeRequiresComptime` which - /// resolves field types rather than asserting they are already resolved. - pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return ty.comptimeOnlyAdvanced(mod, null) catch unreachable; - } - - /// `generic_poison` will return false. - /// May return false negatives when structs and unions are having their field types resolved. - /// If `opt_sema` is not provided, asserts that the type is sufficiently resolved. - pub fn comptimeOnlyAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) Module.CompileError!bool { - const ip = &mod.intern_pool; - return switch (ty.toIntern()) { - .empty_struct_type => false, - - else => switch (ip.indexToKey(ty.toIntern())) { - .int_type => false, - .ptr_type => |ptr_type| { - const child_ty = Type.fromInterned(ptr_type.child); - switch (child_ty.zigTypeTag(mod)) { - .Fn => return !try child_ty.fnHasRuntimeBitsAdvanced(mod, opt_sema), - .Opaque => return false, - else => return child_ty.comptimeOnlyAdvanced(mod, opt_sema), - } - }, - .anyframe_type => |child| { - if (child == .none) return false; - return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema); - }, - .array_type => |array_type| return Type.fromInterned(array_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .vector_type => |vector_type| return Type.fromInterned(vector_type.child).comptimeOnlyAdvanced(mod, opt_sema), - .opt_type => |child| return Type.fromInterned(child).comptimeOnlyAdvanced(mod, opt_sema), - .error_union_type => |error_union_type| return Type.fromInterned(error_union_type.payload_type).comptimeOnlyAdvanced(mod, opt_sema), - - .error_set_type, - .inferred_error_set_type, - => false, - - // These are function bodies, not function pointers. - .func_type => true, - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .anyerror, - .adhoc_inferred_error_set, - .noreturn, - .generic_poison, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => false, - - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => true, - }, - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - // packed structs cannot be comptime-only because they have a well-defined - // memory layout and every field has a well-defined bit pattern. - if (struct_type.layout == .@"packed") - return false; - - // A struct with no fields is not comptime-only. - return switch (struct_type.flagsPtr(ip).requires_comptime) { - .no, .wip => false, - .yes => true, - .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; - - if (struct_type.flagsPtr(ip).field_types_wip) - return false; - - struct_type.flagsPtr(ip).requires_comptime = .wip; - errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; - - try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type); - - for (0..struct_type.field_types.len) |i_usize| { - const i: u32 = @intCast(i_usize); - if (struct_type.fieldIsComptime(ip, i)) continue; - const field_ty = struct_type.field_types.get(ip)[i]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { - // Note that this does not cause the layout to - // be considered resolved. Comptime-only types - // still maintain a layout of their - // runtime-known fields. - struct_type.flagsPtr(ip).requires_comptime = .yes; - return true; - } - } - - struct_type.flagsPtr(ip).requires_comptime = .no; - return false; - }, - }; - }, - - .anon_struct_type => |tuple| { - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { - const have_comptime_val = val != .none; - if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) return true; - } - return false; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - // The type is not resolved; assert that we have a Sema. - const sema = opt_sema.?; - - if (union_type.flagsPtr(ip).status == .field_types_wip) - return false; - - union_type.flagsPtr(ip).requires_comptime = .wip; - errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; - - try sema.resolveTypeFieldsUnion(ty, union_type); - - for (0..union_type.field_types.len) |field_idx| { - const field_ty = union_type.field_types.get(ip)[field_idx]; - if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) { - union_type.flagsPtr(ip).requires_comptime = .yes; - return true; - } - } - - union_type.flagsPtr(ip).requires_comptime = .no; - return false; - }, - } - }, - - .opaque_type => false, - - .enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema), - - // values, not types - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - // memoization, not types - .memoized_call, - => unreachable, - }, - }; - } - - pub fn isVector(ty: Type, mod: *const Module) bool { - return ty.zigTypeTag(mod) == .Vector; - } - - /// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len. - pub fn totalVectorBits(ty: Type, zcu: *Zcu) u64 { - if (!ty.isVector(zcu)) return 0; - const v = zcu.intern_pool.indexToKey(ty.toIntern()).vector_type; - return v.len * Type.fromInterned(v.child).bitSize(zcu); - } - - pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - else => false, - }; - } - - pub fn isIndexable(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - .Pointer => switch (ty.ptrSize(mod)) { - .Slice, .Many, .C => true, - .One => switch (ty.childType(mod).zigTypeTag(mod)) { - .Array, .Vector => true, - .Struct => ty.childType(mod).isTuple(mod), - else => false, - }, - }, - .Struct => ty.isTuple(mod), - else => false, - }; - } - - pub fn indexableHasLen(ty: Type, mod: *Module) bool { - return switch (ty.zigTypeTag(mod)) { - .Array, .Vector => true, - .Pointer => switch (ty.ptrSize(mod)) { - .Many, .C => false, - .Slice => true, - .One => switch (ty.childType(mod).zigTypeTag(mod)) { - .Array, .Vector => true, - .Struct => ty.childType(mod).isTuple(mod), - else => false, - }, - }, - .Struct => ty.isTuple(mod), - else => false, - }; - } - - /// Asserts that the type can have a namespace. - pub fn getNamespaceIndex(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { - return ty.getNamespace(zcu).?; - } - - /// Returns null if the type has no namespace. - pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace, - .struct_type => ip.loadStructType(ty.toIntern()).namespace, - .union_type => ip.loadUnionType(ty.toIntern()).namespace, - .enum_type => ip.loadEnumType(ty.toIntern()).namespace, - - .anon_struct_type => .none, - .simple_type => |s| switch (s) { - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => .none, - else => null, - }, - - else => null, - }; - } - - // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; - } - - /// Asserts that the type is an integer. - pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); - if (info.bits == 0) return mod.intValue(dest_ty, -1); - - if (std.math.cast(u6, info.bits - 1)) |shift| { - const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - } - - var res = try std.math.big.int.Managed.init(mod.gpa); - defer res.deinit(); - - try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - - return mod.intValue_big(dest_ty, res.toConst()); - } - - // Works for vectors and vectors of integers. - /// The returned Value will have type dest_ty. - pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); - return if (ty.zigTypeTag(mod) == .Vector) Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = dest_ty.toIntern(), - .storage = .{ .repeated_elem = scalar.toIntern() }, - } }))) else scalar; - } - - /// The returned Value will have type dest_ty. - pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { - const info = ty.intInfo(mod); - - switch (info.bits) { - 0 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, -1), - .unsigned => try mod.intValue(dest_ty, 0), - }, - 1 => return switch (info.signedness) { - .signed => try mod.intValue(dest_ty, 0), - .unsigned => try mod.intValue(dest_ty, 1), - }, - else => {}, - } - - if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { - .signed => { - const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - }, - .unsigned => { - const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return mod.intValue(dest_ty, n); - }, - }; - - var res = try std.math.big.int.Managed.init(mod.gpa); - defer res.deinit(); - - try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - - return mod.intValue_big(dest_ty, res.toConst()); - } - - /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, mod: *Module) Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod), - .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), - else => unreachable, - }; - } - - pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) { - .nonexhaustive => true, - .auto, .explicit => false, - }, - else => false, - }; - } - - // Asserts that `ty` is an error set and not `anyerror`. - // Asserts that `ty` is resolved if it is an inferred error set. - pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .error_set_type => |x| x.names, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { - .none => unreachable, // unresolved inferred error set - .anyerror_type => unreachable, - else => |t| ip.indexToKey(t).error_set_type.names, - }, - else => unreachable, - }; - } - - pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice { - return mod.intern_pool.loadEnumType(ty.toIntern()).names; - } - - pub fn enumFieldCount(ty: Type, mod: *Module) usize { - return mod.intern_pool.loadEnumType(ty.toIntern()).names.len; - } - - pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { - const ip = &mod.intern_pool; - return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index]; - } - - pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { - const ip = &mod.intern_pool; - const enum_type = ip.loadEnumType(ty.toIntern()); - return enum_type.nameIndex(ip, field_name); - } - - /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or - /// an integer which represents the enum value. Returns the field index in - /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { - const ip = &mod.intern_pool; - const enum_type = ip.loadEnumType(ty.toIntern()); - const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { - .int => enum_tag.toIntern(), - .enum_tag => |info| info.int, - else => unreachable, - }; - assert(ip.typeOf(int_tag) == enum_type.tag_ty); - return enum_type.tagValueIndex(ip, int_tag); - } - - /// Returns none in the case of a tuple which uses the integer index as the field name. - pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), - .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), - else => unreachable, - }; - } - - pub fn structFieldCount(ty: Type, mod: *Module) u32 { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, - .anon_struct_type => |anon_struct| anon_struct.types.len, - else => unreachable, - }; - } - - /// Supports structs and unions. - pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]), - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - return Type.fromInterned(union_obj.field_types.get(ip)[index]); - }, - .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), - else => unreachable, - }; - } - - pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment { - return ty.structFieldAlignAdvanced(index, zcu, null) catch unreachable; - } - - pub fn structFieldAlignAdvanced(ty: Type, index: usize, zcu: *Zcu, opt_sema: ?*Sema) !Alignment { - const ip = &zcu.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.layout != .@"packed"); - const explicit_align = struct_type.fieldAlign(ip, index); - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); - if (opt_sema) |sema| { - return sema.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } else { - return zcu.structFieldAlignment(explicit_align, field_ty, struct_type.layout); - } - }, - .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentAdvanced(zcu, if (opt_sema) |sema| .{ .sema = sema } else .eager)).scalar; - }, - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - if (opt_sema) |sema| { - return sema.unionFieldAlignment(union_obj, @intCast(index)); - } else { - return zcu.unionFieldNormalAlignment(union_obj, @intCast(index)); - } - }, - else => unreachable, - } - } - - pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - const val = struct_type.fieldInit(ip, index); - // TODO: avoid using `unreachable` to indicate this. - if (val == .none) return Value.@"unreachable"; - return Value.fromInterned(val); - }, - .anon_struct_type => |anon_struct| { - const val = anon_struct.values.get(ip)[index]; - // TODO: avoid using `unreachable` to indicate this. - if (val == .none) return Value.@"unreachable"; - return Value.fromInterned(val); - }, - else => unreachable, - } - } - - pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.fieldIsComptime(ip, index)) { - assert(struct_type.haveFieldInits(ip)); - return Value.fromInterned(struct_type.field_inits.get(ip)[index]); - } else { - return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(mod); - } - }, - .anon_struct_type => |tuple| { - const val = tuple.values.get(ip)[index]; - if (val == .none) { - return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(mod); - } else { - return Value.fromInterned(val); - } - }, - else => unreachable, - } - } - - pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), - .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, - else => unreachable, - }; - } - - pub const FieldOffset = struct { - field: usize, - offset: u64, - }; - - /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - const ip = &mod.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - assert(struct_type.haveLayout(ip)); - assert(struct_type.layout != .@"packed"); - return struct_type.offsets.get(ip)[index]; - }, - - .anon_struct_type => |tuple| { - var offset: u64 = 0; - var big_align: Alignment = .none; - - for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| { - if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(mod)) { - // comptime field - if (i == index) return offset; - continue; - } - - const field_align = Type.fromInterned(field_ty).abiAlignment(mod); - big_align = big_align.max(field_align); - offset = field_align.forward(offset); - if (i == index) return offset; - offset += Type.fromInterned(field_ty).abiSize(mod); - } - offset = big_align.max(.@"1").forward(offset); - return offset; - }, - - .union_type => { - const union_type = ip.loadUnionType(ty.toIntern()); - if (!union_type.hasTag(ip)) - return 0; - const layout = mod.getUnionLayout(union_type); - if (layout.tag_align.compare(.gte, layout.payload_align)) { - // {Tag, Payload} - return layout.payload_align.forward(layout.tag_size); - } else { - // {Payload, Tag} - return 0; - } - }, - - else => unreachable, - } - } - - pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { - return ty.getOwnerDeclOrNull(mod) orelse unreachable; - } - - pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).decl, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, - .enum_type => ip.loadEnumType(ty.toIntern()).decl, - else => null, - }; - } - - pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { - const ip = &zcu.intern_pool; - return .{ - .base_node_inst = switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { - .declared => |d| d.zir_index, - .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, - }, - else => return null, - }, - .offset = Module.LazySrcLoc.Offset.nodeOffset(0), - }; - } - - pub fn srcLoc(ty: Type, zcu: *Zcu) Module.LazySrcLoc { - return ty.srcLocOrNull(zcu).?; - } - - pub fn isGenericPoison(ty: Type) bool { - return ty.toIntern() == .generic_poison_type; - } - - pub fn isTuple(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; - }, - .anon_struct_type => |anon_struct| anon_struct.names.len == 0, - else => false, - }; - } - - pub fn isAnonStruct(ty: Type, mod: *Module) bool { - if (ty.toIntern() == .empty_struct_type) return true; - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, - else => false, - }; - } - - pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; - }, - .anon_struct_type => true, - else => false, - }; - } - - pub fn isSimpleTuple(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, - else => false, - }; - } - - pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => true, - else => false, - }; - } - - /// Traverses optional child types and error union payloads until the type - /// is not a pointer. For `E!?u32`, returns `u32`; for `*u8`, returns `*u8`. - pub fn optEuBaseType(ty: Type, mod: *Module) Type { - var cur = ty; - while (true) switch (cur.zigTypeTag(mod)) { - .Optional => cur = cur.optionalChild(mod), - .ErrorUnion => cur = cur.errorUnionPayload(mod), - else => return cur, - }; - } - - pub fn toUnsigned(ty: Type, mod: *Module) !Type { - return switch (ty.zigTypeTag(mod)) { - .Int => mod.intType(.unsigned, ty.intInfo(mod).bits), - .Vector => try mod.vectorType(.{ - .len = ty.vectorLen(mod), - .child = (try ty.childType(mod).toUnsigned(mod)).toIntern(), - }), - else => unreachable, - }; - } - - pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).zir_index, - .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), - .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, - else => null, - }; - } - - pub fn typeDeclSrcLine(ty: Type, zcu: *const Zcu) ?u32 { - const ip = &zcu.intern_pool; - const tracked = switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { - .declared => |d| d.zir_index, - .reified => |r| r.zir_index, - .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, - }, - else => return null, - }; - const info = tracked.resolveFull(&zcu.intern_pool); - const file = zcu.import_table.values()[zcu.path_digest_map.getIndex(info.path_digest).?]; - assert(file.zir_loaded); - const zir = file.zir; - const inst = zir.instructions.get(@intFromEnum(info.inst)); - assert(inst.tag == .extended); - return switch (inst.data.extended.opcode) { - .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, - .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, - .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, - .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, - .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, - else => unreachable, - }; - } - - /// Given a namespace type, returns its list of caotured values. - pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).captures, - .union_type => ip.loadUnionType(ty.toIntern()).captures, - .enum_type => ip.loadEnumType(ty.toIntern()).captures, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).captures, - else => unreachable, - }; - } - - pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } { - var cur_ty: Type = ty; - var cur_len: u64 = 1; - while (cur_ty.zigTypeTag(zcu) == .Array) { - cur_len *= cur_ty.arrayLenIncludingSentinel(zcu); - cur_ty = cur_ty.childType(zcu); - } - return .{ cur_ty, cur_len }; - } - - pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, zcu: *Zcu) union(enum) { - /// The result is a bit-pointer with the same value and a new packed offset. - bit_ptr: InternPool.Key.PtrType.PackedOffset, - /// The result is a standard pointer. - byte_ptr: struct { - /// The byte offset of the field pointer from the parent pointer value. - offset: u64, - /// The alignment of the field pointer type. - alignment: InternPool.Alignment, - }, - } { - comptime assert(Type.packed_struct_layout_version == 2); - - const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu); - const field_ty = struct_ty.structFieldType(field_idx, zcu); - - var bit_offset: u16 = 0; - var running_bits: u16 = 0; - for (0..struct_ty.structFieldCount(zcu)) |i| { - const f_ty = struct_ty.structFieldType(i, zcu); - if (i == field_idx) { - bit_offset = running_bits; - } - running_bits += @intCast(f_ty.bitSize(zcu)); - } - - const res_host_size: u16, const res_bit_offset: u16 = if (parent_ptr_info.packed_offset.host_size != 0) - .{ parent_ptr_info.packed_offset.host_size, parent_ptr_info.packed_offset.bit_offset + bit_offset } - else - .{ (running_bits + 7) / 8, bit_offset }; - - // If the field happens to be byte-aligned, simplify the pointer type. - // We can only do this if the pointee's bit size matches its ABI byte size, - // so that loads and stores do not interfere with surrounding packed bits. - // - // TODO: we do not attempt this with big-endian targets yet because of nested - // structs and floats. I need to double-check the desired behavior for big endian - // targets before adding the necessary complications to this code. This will not - // cause miscompilations; it only means the field pointer uses bit masking when it - // might not be strictly necessary. - if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) { - const byte_offset = res_bit_offset / 8; - const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?)); - return .{ .byte_ptr = .{ - .offset = byte_offset, - .alignment = new_align, - } }; - } - - return .{ .bit_ptr = .{ - .host_size = res_host_size, - .bit_offset = res_bit_offset, - } }; - } - - pub const @"u1": Type = .{ .ip_index = .u1_type }; - pub const @"u8": Type = .{ .ip_index = .u8_type }; - pub const @"u16": Type = .{ .ip_index = .u16_type }; - pub const @"u29": Type = .{ .ip_index = .u29_type }; - pub const @"u32": Type = .{ .ip_index = .u32_type }; - pub const @"u64": Type = .{ .ip_index = .u64_type }; - pub const @"u128": Type = .{ .ip_index = .u128_type }; - - pub const @"i8": Type = .{ .ip_index = .i8_type }; - pub const @"i16": Type = .{ .ip_index = .i16_type }; - pub const @"i32": Type = .{ .ip_index = .i32_type }; - pub const @"i64": Type = .{ .ip_index = .i64_type }; - pub const @"i128": Type = .{ .ip_index = .i128_type }; - - pub const @"f16": Type = .{ .ip_index = .f16_type }; - pub const @"f32": Type = .{ .ip_index = .f32_type }; - pub const @"f64": Type = .{ .ip_index = .f64_type }; - pub const @"f80": Type = .{ .ip_index = .f80_type }; - pub const @"f128": Type = .{ .ip_index = .f128_type }; - - pub const @"bool": Type = .{ .ip_index = .bool_type }; - pub const @"usize": Type = .{ .ip_index = .usize_type }; - pub const @"isize": Type = .{ .ip_index = .isize_type }; - pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; - pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; - pub const @"void": Type = .{ .ip_index = .void_type }; - pub const @"type": Type = .{ .ip_index = .type_type }; - pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; - pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; - pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; - pub const @"null": Type = .{ .ip_index = .null_type }; - pub const @"undefined": Type = .{ .ip_index = .undefined_type }; - pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; - - pub const @"c_char": Type = .{ .ip_index = .c_char_type }; - pub const @"c_short": Type = .{ .ip_index = .c_short_type }; - pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; - pub const @"c_int": Type = .{ .ip_index = .c_int_type }; - pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; - pub const @"c_long": Type = .{ .ip_index = .c_long_type }; - pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; - pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; - pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; - pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; - - pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; - pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; - pub const single_const_pointer_to_comptime_int: Type = .{ - .ip_index = .single_const_pointer_to_comptime_int_type, - }; - pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; - pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; - - pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; - - pub fn smallestUnsignedBits(max: u64) u16 { - if (max == 0) return 0; - const base = std.math.log2(max); - const upper = (@as(u64, 1) << @as(u6, @intCast(base))) - 1; - return @as(u16, @intCast(base + @intFromBool(upper < max))); - } - - /// This is only used for comptime asserts. Bump this number when you make a change - /// to packed struct layout to find out all the places in the codebase you need to edit! - pub const packed_struct_layout_version = 2; -}; - -fn cTypeAlign(target: Target, c_type: Target.CType) Alignment { - return Alignment.fromByteUnits(target.c_type_alignment(c_type)); -} diff --git a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig index f7de8129b7..6ba1329a2e 100644 --- a/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig +++ b/test/cases/compile_errors/compileLog_of_tagged_enum_doesnt_crash_the_compiler.zig @@ -16,6 +16,7 @@ pub export fn entry() void { // target=native // // :6:5: error: found compile log statement +// :6:5: note: also here // // Compile Log Output: // @as(tmp.Bar, .{ .X = 123 }) diff --git a/test/cases/compile_errors/compile_log.zig b/test/cases/compile_errors/compile_log.zig index 6a14b78b17..ac89cfd1b3 100644 --- a/test/cases/compile_errors/compile_log.zig +++ b/test/cases/compile_errors/compile_log.zig @@ -18,6 +18,7 @@ export fn baz() void { // // :6:5: error: found compile log statement // :12:5: note: also here +// :6:5: note: also here // // Compile Log Output: // @as(*const [5:0]u8, "begin") diff --git a/test/cases/compile_errors/direct_struct_loop.zig b/test/cases/compile_errors/direct_struct_loop.zig index 9fdda1bdc7..1eed8aad53 100644 --- a/test/cases/compile_errors/direct_struct_loop.zig +++ b/test/cases/compile_errors/direct_struct_loop.zig @@ -10,4 +10,3 @@ export fn entry() usize { // target=native // // :1:11: error: struct 'tmp.A' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/indirect_struct_loop.zig b/test/cases/compile_errors/indirect_struct_loop.zig index ef5526830e..02ec65f5ab 100644 --- a/test/cases/compile_errors/indirect_struct_loop.zig +++ b/test/cases/compile_errors/indirect_struct_loop.zig @@ -16,6 +16,3 @@ export fn entry() usize { // target=native // // :1:11: error: struct 'tmp.A' depends on itself -// :8:5: note: while checking this field -// :5:5: note: while checking this field -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig index 74cafabe7c..11dd93d01e 100644 --- a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig +++ b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_struct_that_contains_itself.zig @@ -13,4 +13,3 @@ export fn entry() usize { // target=native // // :1:13: error: struct 'tmp.Foo' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig index 6030ca4d3e..8e499ab7e2 100644 --- a/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig +++ b/test/cases/compile_errors/instantiating_an_undefined_value_for_an_invalid_union_that_contains_itself.zig @@ -13,4 +13,3 @@ export fn entry() usize { // target=native // // :1:13: error: union 'tmp.Foo' depends on itself -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig index 02ea7e2710..98c6224626 100644 --- a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig +++ b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig @@ -16,4 +16,3 @@ comptime { // target=native // // :6:21: error: struct layout depends on it having runtime bits -// :4:13: note: while checking this field diff --git a/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig b/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig index cad779e3d7..6a4cba82a6 100644 --- a/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig +++ b/test/cases/compile_errors/struct_depends_on_itself_via_optional_field.zig @@ -15,5 +15,3 @@ export fn entry() void { // target=native // // :1:17: error: struct 'tmp.LhsExpr' depends on itself -// :5:5: note: while checking this field -// :2:5: note: while checking this field diff --git a/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig b/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig index f5647625dd..a0a6d37042 100644 --- a/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig +++ b/test/cases/compile_errors/struct_type_returned_from_non-generic_function.zig @@ -1,5 +1,5 @@ pub export fn entry(param: usize) usize { - return struct { param }; + return struct { @TypeOf(param) }; } // error diff --git a/test/src/Cases.zig b/test/src/Cases.zig index b8a3260ad6..dbf409f53b 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -395,10 +395,7 @@ fn addFromDirInner( if (entry.kind != .file) continue; // Ignore stuff such as .swp files - switch (Compilation.classifyFileExt(entry.basename)) { - .unknown => continue, - else => {}, - } + if (!knownFileExtension(entry.basename)) continue; try filenames.append(try ctx.arena.dupe(u8, entry.path)); } @@ -623,8 +620,6 @@ pub fn lowerToBuildSteps( b: *std.Build, parent_step: *std.Build.Step, test_filters: []const []const u8, - cases_dir_path: []const u8, - incremental_exe: *std.Build.Step.Compile, ) void { const host = std.zig.system.resolveTargetQuery(.{}) catch |err| std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)}); @@ -637,20 +632,11 @@ pub fn lowerToBuildSteps( // compilation is in a happier state. continue; } - for (test_filters) |test_filter| { - if (std.mem.indexOf(u8, incr_case.base_path, test_filter)) |_| break; - } else if (test_filters.len > 0) continue; - const case_base_path_with_dir = std.fs.path.join(b.allocator, &.{ - cases_dir_path, incr_case.base_path, - }) catch @panic("OOM"); - const run = b.addRunArtifact(incremental_exe); - run.setName(incr_case.base_path); - run.addArgs(&.{ - case_base_path_with_dir, - b.graph.zig_exe, - }); - run.expectStdOutEqual(""); - parent_step.dependOn(&run.step); + // TODO: the logic for running these was bad, so I've ripped it out. Rewrite this + // in a way that actually spawns the compiler, communicating with it over the + // compiler server protocol. + _ = incr_case; + @panic("TODO implement incremental test case executor"); } for (self.cases.items) |case| { @@ -1236,192 +1222,6 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const getExternalExecutor = std.zig.system.getExternalExecutor; -const Compilation = @import("../../src/Compilation.zig"); -const zig_h = @import("../../src/link.zig").File.C.zig_h; -const introspect = @import("../../src/introspect.zig"); -const ThreadPool = std.Thread.Pool; -const WaitGroup = std.Thread.WaitGroup; -const build_options = @import("build_options"); -const Package = @import("../../src/Package.zig"); - -pub const std_options = .{ - .log_level = .err, -}; - -var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{ - .stack_trace_frames = build_options.mem_leak_frames, -}){}; - -// TODO: instead of embedding the compiler in this process, spawn the compiler -// as a sub-process and communicate the updates using the compiler protocol. -pub fn main() !void { - const use_gpa = build_options.force_gpa or !builtin.link_libc; - const gpa = gpa: { - if (use_gpa) { - break :gpa general_purpose_allocator.allocator(); - } - // We would prefer to use raw libc allocator here, but cannot - // use it if it won't support the alignment we need. - if (@alignOf(std.c.max_align_t) < @alignOf(i128)) { - break :gpa std.heap.c_allocator; - } - break :gpa std.heap.raw_c_allocator; - }; - - var single_threaded_arena = std.heap.ArenaAllocator.init(gpa); - defer single_threaded_arena.deinit(); - - var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ - .child_allocator = single_threaded_arena.allocator(), - }; - const arena = thread_safe_arena.allocator(); - - const args = try std.process.argsAlloc(arena); - const case_file_path = args[1]; - const zig_exe_path = args[2]; - - var filenames = std.ArrayList([]const u8).init(arena); - - const case_dirname = std.fs.path.dirname(case_file_path).?; - var iterable_dir = try std.fs.cwd().openDir(case_dirname, .{ .iterate = true }); - defer iterable_dir.close(); - - if (std.mem.endsWith(u8, case_file_path, ".0.zig")) { - const stem = case_file_path[case_dirname.len + 1 .. case_file_path.len - "0.zig".len]; - var it = iterable_dir.iterate(); - while (try it.next()) |entry| { - if (entry.kind != .file) continue; - if (!std.mem.startsWith(u8, entry.name, stem)) continue; - try filenames.append(try std.fs.path.join(arena, &.{ case_dirname, entry.name })); - } - } else { - try filenames.append(case_file_path); - } - - if (filenames.items.len == 0) { - std.debug.print("failed to find the input source file(s) from '{s}'\n", .{ - case_file_path, - }); - std.process.exit(1); - } - - // Sort filenames, so that incremental tests are contiguous and in-order - sortTestFilenames(filenames.items); - - var ctx = Cases.init(gpa, arena); - - var test_it = TestIterator{ .filenames = filenames.items }; - while (try test_it.next()) |batch| { - const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent; - var cases = std.ArrayList(usize).init(arena); - - for (batch) |filename| { - const max_file_size = 10 * 1024 * 1024; - const src = try iterable_dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0); - - // Parse the manifest - var manifest = try TestManifest.parse(arena, src); - - if (cases.items.len == 0) { - const backends = try manifest.getConfigForKeyAlloc(arena, "backend", Backend); - const targets = try manifest.getConfigForKeyAlloc(arena, "target", std.Target.Query); - const c_frontends = try manifest.getConfigForKeyAlloc(ctx.arena, "c_frontend", CFrontend); - const is_test = try manifest.getConfigForKeyAssertSingle("is_test", bool); - const link_libc = try manifest.getConfigForKeyAssertSingle("link_libc", bool); - const output_mode = try manifest.getConfigForKeyAssertSingle("output_mode", std.builtin.OutputMode); - - if (manifest.type == .translate_c) { - for (c_frontends) |c_frontend| { - for (targets) |target_query| { - const output = try manifest.trailingLinesSplit(ctx.arena); - try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), - .c_frontend = c_frontend, - .target = resolveTargetQuery(target_query), - .is_test = is_test, - .link_libc = link_libc, - .input = src, - .kind = .{ .translate = output }, - }); - } - } - continue; - } - if (manifest.type == .run_translated_c) { - for (c_frontends) |c_frontend| { - for (targets) |target_query| { - const output = try manifest.trailingSplit(ctx.arena); - try ctx.translate.append(.{ - .name = std.fs.path.stem(filename), - .c_frontend = c_frontend, - .target = resolveTargetQuery(target_query), - .is_test = is_test, - .link_libc = link_libc, - .output = output, - .input = src, - .kind = .{ .run = output }, - }); - } - } - continue; - } - - // Cross-product to get all possible test combinations - for (backends) |backend| { - for (targets) |target| { - const next = ctx.cases.items.len; - try ctx.cases.append(.{ - .name = std.fs.path.stem(filename), - .target = target, - .backend = backend, - .updates = std.ArrayList(Cases.Update).init(ctx.cases.allocator), - .is_test = is_test, - .output_mode = output_mode, - .link_libc = backend == .llvm, - .deps = std.ArrayList(DepModule).init(ctx.cases.allocator), - }); - try cases.append(next); - } - } - } - - for (cases.items) |case_index| { - const case = &ctx.cases.items[case_index]; - if (strategy == .incremental and case.backend == .stage2 and case.target.getCpuArch() == .x86_64 and !case.link_libc and case.target.getOsTag() != .plan9) { - // https://github.com/ziglang/zig/issues/15174 - continue; - } - - switch (manifest.type) { - .compile => { - case.addCompile(src); - }, - .@"error" => { - const errors = try manifest.trailingLines(arena); - switch (strategy) { - .independent => { - case.addError(src, errors); - }, - .incremental => { - case.addErrorNamed("update", src, errors); - }, - } - }, - .run => { - const output = try manifest.trailingSplit(ctx.arena); - case.addCompareOutput(src, output); - }, - .translate_c => @panic("c_frontend specified for compile case"), - .run_translated_c => @panic("c_frontend specified for compile case"), - .cli => @panic("TODO cli tests"), - } - } - } - } - - return runCases(&ctx, zig_exe_path); -} - fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { return .{ .query = query, @@ -1430,470 +1230,33 @@ fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget { }; } -fn runCases(self: *Cases, zig_exe_path: []const u8) !void { - const host = try std.zig.system.resolveTargetQuery(.{}); - - var progress = std.Progress{}; - const root_node = progress.start("compiler", self.cases.items.len); - progress.terminal = null; - defer root_node.end(); - - var zig_lib_directory = try introspect.findZigLibDirFromSelfExe(self.gpa, zig_exe_path); - defer zig_lib_directory.handle.close(); - defer self.gpa.free(zig_lib_directory.path.?); - - var aux_thread_pool: ThreadPool = undefined; - try aux_thread_pool.init(.{ .allocator = self.gpa }); - defer aux_thread_pool.deinit(); - - // Use the same global cache dir for all the tests, such that we for example don't have to - // rebuild musl libc for every case (when LLVM backend is enabled). - var global_tmp = std.testing.tmpDir(.{}); - defer global_tmp.cleanup(); - - var cache_dir = try global_tmp.dir.makeOpenPath(".zig-cache", .{}); - defer cache_dir.close(); - const tmp_dir_path = try std.fs.path.join(self.gpa, &[_][]const u8{ ".", ".zig-cache", "tmp", &global_tmp.sub_path }); - defer self.gpa.free(tmp_dir_path); - - const global_cache_directory: Compilation.Directory = .{ - .handle = cache_dir, - .path = try std.fs.path.join(self.gpa, &[_][]const u8{ tmp_dir_path, ".zig-cache" }), - }; - defer self.gpa.free(global_cache_directory.path.?); - - { - for (self.cases.items) |*case| { - if (build_options.skip_non_native) { - if (case.target.getCpuArch() != builtin.cpu.arch) - continue; - if (case.target.getObjectFormat() != builtin.object_format) - continue; - } - - // Skip tests that require LLVM backend when it is not available - if (!build_options.have_llvm and case.backend == .llvm) - continue; - - assert(case.backend != .stage1); - - for (build_options.test_filters) |test_filter| { - if (std.mem.indexOf(u8, case.name, test_filter)) |_| break; - } else if (build_options.test_filters.len > 0) continue; - - var prg_node = root_node.start(case.name, case.updates.items.len); - prg_node.activate(); - defer prg_node.end(); - - try runOneCase( - self.gpa, - &prg_node, - case.*, - zig_lib_directory, - zig_exe_path, - &aux_thread_pool, - global_cache_directory, - host, - ); - } - - for (self.translate.items) |*case| { - _ = case; - @panic("TODO is this even used?"); - } +fn knownFileExtension(filename: []const u8) bool { + // List taken from `Compilation.classifyFileExt` in the compiler. + for ([_][]const u8{ + ".c", ".C", ".cc", ".cpp", + ".cxx", ".stub", ".m", ".mm", + ".ll", ".bc", ".s", ".S", + ".h", ".zig", ".so", ".dll", + ".dylib", ".tbd", ".a", ".lib", + ".o", ".obj", ".cu", ".def", + ".rc", ".res", ".manifest", + }) |ext| { + if (std.mem.endsWith(u8, filename, ext)) return true; } -} - -fn runOneCase( - allocator: Allocator, - root_node: *std.Progress.Node, - case: Case, - zig_lib_directory: Compilation.Directory, - zig_exe_path: []const u8, - thread_pool: *ThreadPool, - global_cache_directory: Compilation.Directory, - host: std.Target, -) !void { - const tmp_src_path = "tmp.zig"; - const enable_rosetta = build_options.enable_rosetta; - const enable_qemu = build_options.enable_qemu; - const enable_wine = build_options.enable_wine; - const enable_wasmtime = build_options.enable_wasmtime; - const enable_darling = build_options.enable_darling; - const glibc_runtimes_dir: ?[]const u8 = build_options.glibc_runtimes_dir; - - const target = try std.zig.system.resolveTargetQuery(case.target); - - var arena_allocator = std.heap.ArenaAllocator.init(allocator); - defer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - var tmp = std.testing.tmpDir(.{}); - defer tmp.cleanup(); - - var cache_dir = try tmp.dir.makeOpenPath(".zig-cache", .{}); - defer cache_dir.close(); - - const tmp_dir_path = try std.fs.path.join( - arena, - &[_][]const u8{ ".", ".zig-cache", "tmp", &tmp.sub_path }, - ); - const local_cache_path = try std.fs.path.join( - arena, - &[_][]const u8{ tmp_dir_path, ".zig-cache" }, - ); - - const zig_cache_directory: Compilation.Directory = .{ - .handle = cache_dir, - .path = local_cache_path, - }; - - var main_pkg: Package = .{ - .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir }, - .root_src_path = tmp_src_path, - }; - defer { - var it = main_pkg.table.iterator(); - while (it.next()) |kv| { - allocator.free(kv.key_ptr.*); - kv.value_ptr.*.destroy(allocator); - } - main_pkg.table.deinit(allocator); - } - - for (case.deps.items) |dep| { - var pkg = try Package.create( - allocator, - tmp_dir_path, - dep.path, - ); - errdefer pkg.destroy(allocator); - try main_pkg.add(allocator, dep.name, pkg); - } - - const bin_name = try std.zig.binNameAlloc(arena, .{ - .root_name = "test_case", - .target = target, - .output_mode = case.output_mode, - }); - - const emit_directory: Compilation.Directory = .{ - .path = tmp_dir_path, - .handle = tmp.dir, - }; - const emit_bin: Compilation.EmitLoc = .{ - .directory = emit_directory, - .basename = bin_name, - }; - const emit_h: ?Compilation.EmitLoc = if (case.emit_h) .{ - .directory = emit_directory, - .basename = "test_case.h", - } else null; - const use_llvm: bool = switch (case.backend) { - .llvm => true, - else => false, - }; - const comp = try Compilation.create(allocator, .{ - .local_cache_directory = zig_cache_directory, - .global_cache_directory = global_cache_directory, - .zig_lib_directory = zig_lib_directory, - .thread_pool = thread_pool, - .root_name = "test_case", - .target = target, - // TODO: support tests for object file building, and library builds - // and linking. This will require a rework to support multi-file - // tests. - .output_mode = case.output_mode, - .is_test = case.is_test, - .optimize_mode = case.optimize_mode, - .emit_bin = emit_bin, - .emit_h = emit_h, - .main_pkg = &main_pkg, - .keep_source_files_loaded = true, - .is_native_os = case.target.isNativeOs(), - .is_native_abi = case.target.isNativeAbi(), - .dynamic_linker = target.dynamic_linker.get(), - .link_libc = case.link_libc, - .use_llvm = use_llvm, - .self_exe_path = zig_exe_path, - // TODO instead of turning off color, pass in a std.Progress.Node - .color = .off, - .reference_trace = 0, - // TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in - // until the auto-select mechanism deems them worthy - .use_lld = switch (case.backend) { - .stage2 => false, - else => null, - }, - }); - defer comp.destroy(); - - update: for (case.updates.items, 0..) |update, update_index| { - var update_node = root_node.start(update.name, 3); - update_node.activate(); - defer update_node.end(); - - var sync_node = update_node.start("write", 0); - sync_node.activate(); - for (update.files.items) |file| { - try tmp.dir.writeFile(.{ .sub_path = file.path, .data = file.src }); - } - sync_node.end(); - - var module_node = update_node.start("parse/analysis/codegen", 0); - module_node.activate(); - try comp.makeBinFileWritable(); - try comp.update(&module_node); - module_node.end(); - - if (update.case != .Error) { - var all_errors = try comp.getAllErrorsAlloc(); - defer all_errors.deinit(allocator); - if (all_errors.errorMessageCount() > 0) { - all_errors.renderToStdErr(.{ - .ttyconf = std.io.tty.detectConfig(std.io.getStdErr()), - }); - // TODO print generated C code - return error.UnexpectedCompileErrors; - } - } - - switch (update.case) { - .Header => |expected_output| { - var file = try tmp.dir.openFile("test_case.h", .{ .mode = .read_only }); - defer file.close(); - const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024); - - try std.testing.expectEqualStrings(expected_output, out); - }, - .CompareObjectFile => |expected_output| { - var file = try tmp.dir.openFile(bin_name, .{ .mode = .read_only }); - defer file.close(); - const out = try file.reader().readAllAlloc(arena, 5 * 1024 * 1024); - - try std.testing.expectEqualStrings(expected_output, out); - }, - .Compile => {}, - .Error => |expected_errors| { - var test_node = update_node.start("assert", 0); - test_node.activate(); - defer test_node.end(); - - var error_bundle = try comp.getAllErrorsAlloc(); - defer error_bundle.deinit(allocator); - - if (error_bundle.errorMessageCount() == 0) { - return error.ExpectedCompilationErrors; - } - - var actual_stderr = std.ArrayList(u8).init(arena); - try error_bundle.renderToWriter(.{ - .ttyconf = .no_color, - .include_reference_trace = false, - .include_source_line = false, - }, actual_stderr.writer()); - - // Render the expected lines into a string that we can compare verbatim. - var expected_generated = std.ArrayList(u8).init(arena); - - var actual_line_it = std.mem.splitScalar(u8, actual_stderr.items, '\n'); - for (expected_errors) |expect_line| { - const actual_line = actual_line_it.next() orelse { - try expected_generated.appendSlice(expect_line); - try expected_generated.append('\n'); - continue; - }; - if (std.mem.endsWith(u8, actual_line, expect_line)) { - try expected_generated.appendSlice(actual_line); - try expected_generated.append('\n'); - continue; - } - if (std.mem.startsWith(u8, expect_line, ":?:?: ")) { - if (std.mem.endsWith(u8, actual_line, expect_line[":?:?: ".len..])) { - try expected_generated.appendSlice(actual_line); - try expected_generated.append('\n'); - continue; - } - } - try expected_generated.appendSlice(expect_line); - try expected_generated.append('\n'); - } - - try std.testing.expectEqualStrings(expected_generated.items, actual_stderr.items); - }, - .Execution => |expected_stdout| { - if (!std.process.can_spawn) { - std.debug.print("Unable to spawn child processes on {s}, skipping test.\n", .{@tagName(builtin.os.tag)}); - continue :update; // Pass test. - } - - update_node.setEstimatedTotalItems(4); - - var argv = std.ArrayList([]const u8).init(allocator); - defer argv.deinit(); - - const exec_result = x: { - var exec_node = update_node.start("execute", 0); - exec_node.activate(); - defer exec_node.end(); - - // We go out of our way here to use the unique temporary directory name in - // the exe_path so that it makes its way into the cache hash, avoiding - // cache collisions from multiple threads doing `zig run` at the same time - // on the same test_case.c input filename. - const ss = std.fs.path.sep_str; - const exe_path = try std.fmt.allocPrint( - arena, - ".." ++ ss ++ "{s}" ++ ss ++ "{s}", - .{ &tmp.sub_path, bin_name }, - ); - if (case.target.ofmt != null and case.target.ofmt.? == .c) { - if (getExternalExecutor(host, &target, .{ .link_libc = true }) != .native) { - // We wouldn't be able to run the compiled C code. - continue :update; // Pass test. - } - try argv.appendSlice(&[_][]const u8{ - zig_exe_path, - "run", - "-cflags", - "-std=c99", - "-pedantic", - "-Werror", - "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875 - "--", - "-lc", - exe_path, - }); - if (zig_lib_directory.path) |p| { - try argv.appendSlice(&.{ "-I", p }); - } - } else switch (getExternalExecutor(host, &target, .{ .link_libc = case.link_libc })) { - .native => { - if (case.backend == .stage2 and case.target.getCpuArch().isArmOrThumb()) { - // https://github.com/ziglang/zig/issues/13623 - continue :update; // Pass test. - } - try argv.append(exe_path); - }, - .bad_dl, .bad_os_or_cpu => continue :update, // Pass test. - - .rosetta => if (enable_rosetta) { - try argv.append(exe_path); - } else { - continue :update; // Rosetta not available, pass test. - }, - - .qemu => |qemu_bin_name| if (enable_qemu) { - const need_cross_glibc = target.isGnuLibC() and case.link_libc; - const glibc_dir_arg: ?[]const u8 = if (need_cross_glibc) - glibc_runtimes_dir orelse continue :update // glibc dir not available; pass test - else - null; - try argv.append(qemu_bin_name); - if (glibc_dir_arg) |dir| { - const linux_triple = try target.linuxTriple(arena); - const full_dir = try std.fs.path.join(arena, &[_][]const u8{ - dir, - linux_triple, - }); - - try argv.append("-L"); - try argv.append(full_dir); - } - try argv.append(exe_path); - } else { - continue :update; // QEMU not available; pass test. - }, - - .wine => |wine_bin_name| if (enable_wine) { - try argv.append(wine_bin_name); - try argv.append(exe_path); - } else { - continue :update; // Wine not available; pass test. - }, - - .wasmtime => |wasmtime_bin_name| if (enable_wasmtime) { - try argv.append(wasmtime_bin_name); - try argv.append("--dir=."); - try argv.append(exe_path); - } else { - continue :update; // wasmtime not available; pass test. - }, - - .darling => |darling_bin_name| if (enable_darling) { - try argv.append(darling_bin_name); - // Since we use relative to cwd here, we invoke darling with - // "shell" subcommand. - try argv.append("shell"); - try argv.append(exe_path); - } else { - continue :update; // Darling not available; pass test. - }, - } - - try comp.makeBinFileExecutable(); - - while (true) { - break :x std.process.Child.run(.{ - .allocator = allocator, - .argv = argv.items, - .cwd_dir = tmp.dir, - .cwd = tmp_dir_path, - }) catch |err| switch (err) { - error.FileBusy => { - // There is a fundamental design flaw in Unix systems with how - // ETXTBSY interacts with fork+exec. - // https://github.com/golang/go/issues/22315 - // https://bugs.openjdk.org/browse/JDK-8068370 - // Unfortunately, this could be a real error, but we can't - // tell the difference here. - continue; - }, - else => { - std.debug.print("\n{s}.{d} The following command failed with {s}:\n", .{ - case.name, update_index, @errorName(err), - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - }, - }; - } - }; - var test_node = update_node.start("test", 0); - test_node.activate(); - defer test_node.end(); - defer allocator.free(exec_result.stdout); - defer allocator.free(exec_result.stderr); - switch (exec_result.term) { - .Exited => |code| { - if (code != 0) { - std.debug.print("\n{s}\n{s}: execution exited with code {d}:\n", .{ - exec_result.stderr, case.name, code, - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - } - }, - else => { - std.debug.print("\n{s}\n{s}: execution crashed:\n", .{ - exec_result.stderr, case.name, - }); - dumpArgs(argv.items); - return error.ChildProcessExecution; - }, - } - try std.testing.expectEqualStrings(expected_stdout, exec_result.stdout); - // We allow stderr to have garbage in it because wasmtime prints a - // warning about --invoke even though we don't pass it. - //std.testing.expectEqualStrings("", exec_result.stderr); - }, - } - } -} - -fn dumpArgs(argv: []const []const u8) void { - for (argv) |arg| { - std.debug.print("{s} ", .{arg}); - } - std.debug.print("\n", .{}); + // Final check for .so.X, .so.X.Y, .so.X.Y.Z. + // From `Compilation.hasSharedLibraryExt`. + var it = std.mem.splitScalar(u8, filename, '.'); + _ = it.first(); + var so_txt = it.next() orelse return false; + while (!std.mem.eql(u8, so_txt, "so")) { + so_txt = it.next() orelse return false; + } + const n1 = it.next() orelse return false; + const n2 = it.next(); + const n3 = it.next(); + _ = std.fmt.parseInt(u32, n1, 10) catch return false; + if (n2) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; + if (n3) |x| _ = std.fmt.parseInt(u32, x, 10) catch return false; + if (it.next() != null) return false; + return false; } diff --git a/test/tests.zig b/test/tests.zig index 2202936d59..95a86c68f6 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1250,7 +1250,6 @@ pub fn addCases( b: *std.Build, parent_step: *Step, test_filters: []const []const u8, - check_case_exe: *std.Build.Step.Compile, target: std.Build.ResolvedTarget, translate_c_options: @import("src/Cases.zig").TranslateCOptions, build_options: @import("cases.zig").BuildOptions, @@ -1268,12 +1267,9 @@ pub fn addCases( cases.lowerToTranslateCSteps(b, parent_step, test_filters, target, translate_c_options); - const cases_dir_path = try b.build_root.join(b.allocator, &.{ "test", "cases" }); cases.lowerToBuildSteps( b, parent_step, test_filters, - cases_dir_path, - check_case_exe, ); }