diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index 073f2d68d4..77c8344a86 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -501,8 +501,8 @@ fn checkType(ty: Type, zcu: *Zcu) bool { .struct_type => { const struct_obj = zcu.typeToStruct(ty).?; return switch (struct_obj.layout) { - .@"packed" => struct_obj.backingIntType(ip).* != .none, - .auto, .@"extern" => struct_obj.flagsPtr(ip).fully_resolved, + .@"packed" => struct_obj.backingIntTypeUnordered(ip) != .none, + .auto, .@"extern" => struct_obj.flagsUnordered(ip).fully_resolved, }; }, .anon_struct_type => |tuple| { @@ -516,6 +516,6 @@ fn checkType(ty: Type, zcu: *Zcu) bool { }, else => unreachable, }, - .Union => return zcu.typeToUnion(ty).?.flagsPtr(ip).status == .fully_resolved, + .Union => return zcu.typeToUnion(ty).?.flagsUnordered(ip).status == .fully_resolved, }; } diff --git a/src/Compilation.zig b/src/Compilation.zig index d262d6742d..a785351df5 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -101,7 +101,15 @@ link_error_flags: link.File.ErrorFlags = .{}, link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .{}, lld_errors: std.ArrayListUnmanaged(LldError) = .{}, -work_queue: std.fifo.LinearFifo(Job, .Dynamic), +work_queues: [ + len: { + var len: usize = 0; + for (std.enums.values(Job.Tag)) |tag| { + len = @max(Job.stage(tag) + 1, len); + } + break :len len; + } +]std.fifo.LinearFifo(Job, .Dynamic), codegen_work: if (InternPool.single_threaded) void else struct { mutex: std.Thread.Mutex, @@ -370,6 +378,20 @@ const Job = union(enum) { /// The value is the index into `system_libs`. windows_import_lib: usize, + + const Tag = @typeInfo(Job).Union.tag_type.?; + fn stage(tag: Tag) usize { + return switch (tag) { + // Prioritize functions so that codegen can get to work on them on a + // separate thread, while Sema goes back to its own work. + .resolve_type_fully, .analyze_func, .codegen_func => 0, + else => 1, + }; + } + comptime { + // Job dependencies + assert(stage(.resolve_type_fully) <= stage(.codegen_func)); + } }; const CodegenJob = union(enum) { @@ -1452,7 +1474,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .emit_asm = options.emit_asm, .emit_llvm_ir = options.emit_llvm_ir, .emit_llvm_bc = options.emit_llvm_bc, - .work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa), + .work_queues = .{std.fifo.LinearFifo(Job, .Dynamic).init(gpa)} ** @typeInfo(std.meta.FieldType(Compilation, .work_queues)).Array.len, .codegen_work = if (InternPool.single_threaded) {} else .{ .mutex = .{}, .cond = .{}, @@ -1760,12 +1782,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; if (glibc.needsCrtiCrtn(target)) { - try comp.work_queue.write(&[_]Job{ + try comp.queueJobs(&[_]Job{ .{ .glibc_crt_file = .crti_o }, .{ .glibc_crt_file = .crtn_o }, }); } - try comp.work_queue.write(&[_]Job{ + try comp.queueJobs(&[_]Job{ .{ .glibc_crt_file = .scrt1_o }, .{ .glibc_crt_file = .libc_nonshared_a }, .{ .glibc_shared_objects = {} }, @@ -1774,14 +1796,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (comp.wantBuildMuslFromSource()) { if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; - try comp.work_queue.ensureUnusedCapacity(6); if (musl.needsCrtiCrtn(target)) { - comp.work_queue.writeAssumeCapacity(&[_]Job{ + try comp.queueJobs(&[_]Job{ .{ .musl_crt_file = .crti_o }, .{ .musl_crt_file = .crtn_o }, }); } - comp.work_queue.writeAssumeCapacity(&[_]Job{ + try comp.queueJobs(&[_]Job{ .{ .musl_crt_file = .crt1_o }, .{ .musl_crt_file = .scrt1_o }, .{ .musl_crt_file = .rcrt1_o }, @@ -1795,15 +1816,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (comp.wantBuildWasiLibcFromSource()) { if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; - // worst-case we need all components - try comp.work_queue.ensureUnusedCapacity(comp.wasi_emulated_libs.len + 2); - for (comp.wasi_emulated_libs) |crt_file| { - comp.work_queue.writeItemAssumeCapacity(.{ + try comp.queueJob(.{ .wasi_libc_crt_file = crt_file, }); } - comp.work_queue.writeAssumeCapacity(&[_]Job{ + try comp.queueJobs(&[_]Job{ .{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) }, .{ .wasi_libc_crt_file = .libc_a }, }); @@ -1813,9 +1831,10 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable; const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o }; - try comp.work_queue.ensureUnusedCapacity(2); - comp.work_queue.writeItemAssumeCapacity(.{ .mingw_crt_file = .mingw32_lib }); - comp.work_queue.writeItemAssumeCapacity(crt_job); + try comp.queueJobs(&.{ + .{ .mingw_crt_file = .mingw32_lib }, + crt_job, + }); // When linking mingw-w64 there are some import libs we always need. for (mingw.always_link_libs) |name| { @@ -1829,20 +1848,19 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil // Generate Windows import libs. if (target.os.tag == .windows) { const count = comp.system_libs.count(); - try comp.work_queue.ensureUnusedCapacity(count); for (0..count) |i| { - comp.work_queue.writeItemAssumeCapacity(.{ .windows_import_lib = i }); + try comp.queueJob(.{ .windows_import_lib = i }); } } if (comp.wantBuildLibUnwindFromSource()) { - try comp.work_queue.writeItem(.{ .libunwind = {} }); + try comp.queueJob(.{ .libunwind = {} }); } if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) { - try comp.work_queue.writeItem(.libcxx); - try comp.work_queue.writeItem(.libcxxabi); + try comp.queueJob(.libcxx); + try comp.queueJob(.libcxxabi); } if (build_options.have_llvm and comp.config.any_sanitize_thread) { - try comp.work_queue.writeItem(.libtsan); + try comp.queueJob(.libtsan); } if (target.isMinGW() and comp.config.any_non_single_threaded) { @@ -1872,7 +1890,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (!comp.skip_linker_dependencies and is_exe_or_dyn_lib and !comp.config.link_libc and capable_of_building_zig_libc) { - try comp.work_queue.writeItem(.{ .zig_libc = {} }); + try comp.queueJob(.{ .zig_libc = {} }); } } @@ -1883,7 +1901,7 @@ pub fn destroy(comp: *Compilation) void { if (comp.bin_file) |lf| lf.destroy(); if (comp.module) |zcu| zcu.deinit(); comp.cache_use.deinit(); - comp.work_queue.deinit(); + for (comp.work_queues) |work_queue| work_queue.deinit(); if (!InternPool.single_threaded) comp.codegen_work.queue.deinit(); comp.c_object_work_queue.deinit(); if (!build_options.only_core_functionality) { @@ -2199,13 +2217,13 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - try comp.work_queue.writeItem(.{ .analyze_mod = std_mod }); + try comp.queueJob(.{ .analyze_mod = std_mod }); if (comp.config.is_test) { - try comp.work_queue.writeItem(.{ .analyze_mod = zcu.main_mod }); + try comp.queueJob(.{ .analyze_mod = zcu.main_mod }); } if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| { - try comp.work_queue.writeItem(.{ .analyze_mod = compiler_rt_mod }); + try comp.queueJob(.{ .analyze_mod = compiler_rt_mod }); } } @@ -2852,11 +2870,7 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { - //items_len: u32, - //extra_len: u32, - //limbs_len: u32, - //string_bytes_len: u32, - //tracked_insts_len: u32, + thread_count: u32, src_hash_deps_len: u32, decl_val_deps_len: u32, namespace_deps_len: u32, @@ -2864,28 +2878,39 @@ const Header = extern struct { first_dependency_len: u32, dep_entries_len: u32, free_dep_entries_len: u32, - //files_len: u32, }, + + const PerThread = extern struct { + intern_pool: extern struct { + items_len: u32, + extra_len: u32, + limbs_len: u32, + string_bytes_len: u32, + tracked_insts_len: u32, + files_len: u32, + }, + }; }; /// Note that all state that is included in the cache hash namespace is *not* /// saved, such as the target and most CLI flags. A cache hit will only occur /// when subsequent compiler invocations use the same set of flags. pub fn saveState(comp: *Compilation) !void { - var bufs_list: [21]std.posix.iovec_const = undefined; - var bufs_len: usize = 0; - const lf = comp.bin_file orelse return; + const gpa = comp.gpa; + + var bufs = std.ArrayList(std.posix.iovec_const).init(gpa); + defer bufs.deinit(); + + var pt_headers = std.ArrayList(Header.PerThread).init(gpa); + defer pt_headers.deinit(); + if (comp.module) |zcu| { const ip = &zcu.intern_pool; const header: Header = .{ .intern_pool = .{ - //.items_len = @intCast(ip.items.len), - //.extra_len = @intCast(ip.extra.items.len), - //.limbs_len = @intCast(ip.limbs.items.len), - //.string_bytes_len = @intCast(ip.string_bytes.items.len), - //.tracked_insts_len = @intCast(ip.tracked_insts.count()), + .thread_count = @intCast(ip.locals.len), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .decl_val_deps_len = @intCast(ip.decl_val_deps.count()), .namespace_deps_len = @intCast(ip.namespace_deps.count()), @@ -2893,38 +2918,54 @@ pub fn saveState(comp: *Compilation) !void { .first_dependency_len = @intCast(ip.first_dependency.count()), .dep_entries_len = @intCast(ip.dep_entries.items.len), .free_dep_entries_len = @intCast(ip.free_dep_entries.items.len), - //.files_len = @intCast(ip.files.entries.len), }, }; - addBuf(&bufs_list, &bufs_len, mem.asBytes(&header)); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items)); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items)); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data))); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag))); - //addBuf(&bufs_list, &bufs_len, ip.string_bytes.items); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.decl_val_deps.values())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_deps.values())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.namespace_name_deps.values())); + try pt_headers.ensureTotalCapacityPrecise(header.intern_pool.thread_count); + for (ip.locals) |*local| pt_headers.appendAssumeCapacity(.{ + .intern_pool = .{ + .items_len = @intCast(local.mutate.items.len), + .extra_len = @intCast(local.mutate.extra.len), + .limbs_len = @intCast(local.mutate.limbs.len), + .string_bytes_len = @intCast(local.mutate.strings.len), + .tracked_insts_len = @intCast(local.mutate.tracked_insts.len), + .files_len = @intCast(local.mutate.files.len), + }, + }); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.keys())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.first_dependency.values())); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.dep_entries.items)); - addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.free_dep_entries.items)); + try bufs.ensureTotalCapacityPrecise(14 + 8 * pt_headers.items.len); + addBuf(&bufs, mem.asBytes(&header)); + addBuf(&bufs, mem.sliceAsBytes(pt_headers.items)); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.keys())); - //addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.files.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.decl_val_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.decl_val_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.values())); - // TODO: compilation errors - // TODO: namespaces - // TODO: decls - // TODO: linker state + addBuf(&bufs, mem.sliceAsBytes(ip.first_dependency.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.first_dependency.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.dep_entries.items)); + addBuf(&bufs, mem.sliceAsBytes(ip.free_dep_entries.items)); + + for (ip.locals, pt_headers.items) |*local, pt_header| { + addBuf(&bufs, mem.sliceAsBytes(local.shared.limbs.view().items(.@"0")[0..pt_header.intern_pool.limbs_len])); + addBuf(&bufs, mem.sliceAsBytes(local.shared.extra.view().items(.@"0")[0..pt_header.intern_pool.extra_len])); + addBuf(&bufs, mem.sliceAsBytes(local.shared.items.view().items(.data)[0..pt_header.intern_pool.items_len])); + addBuf(&bufs, mem.sliceAsBytes(local.shared.items.view().items(.tag)[0..pt_header.intern_pool.items_len])); + addBuf(&bufs, local.shared.strings.view().items(.@"0")[0..pt_header.intern_pool.string_bytes_len]); + addBuf(&bufs, mem.sliceAsBytes(local.shared.tracked_insts.view().items(.@"0")[0..pt_header.intern_pool.tracked_insts_len])); + addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.bin_digest)[0..pt_header.intern_pool.files_len])); + addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.root_decl)[0..pt_header.intern_pool.files_len])); + } + + //// TODO: compilation errors + //// TODO: namespaces + //// TODO: decls + //// TODO: linker state } var basename_buf: [255]u8 = undefined; const basename = std.fmt.bufPrint(&basename_buf, "{s}.zcs", .{ @@ -2938,20 +2979,14 @@ pub fn saveState(comp: *Compilation) !void { // the previous incremental compilation state. var af = try lf.emit.directory.handle.atomicFile(basename, .{}); defer af.deinit(); - try af.file.pwritevAll(bufs_list[0..bufs_len], 0); + try af.file.pwritevAll(bufs.items, 0); try af.finish(); } -fn addBuf(bufs_list: []std.posix.iovec_const, bufs_len: *usize, buf: []const u8) void { +fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void { // Even when len=0, the undefined pointer might cause EFAULT. if (buf.len == 0) return; - - const i = bufs_len.*; - bufs_len.* = i + 1; - bufs_list[i] = .{ - .base = buf.ptr, - .len = buf.len, - }; + list.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len }); } /// This function is temporally single-threaded. @@ -3011,7 +3046,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } } - if (zcu.intern_pool.global_error_set.mutate.list.len > zcu.error_limit) { + if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) { total += 1; } } @@ -3095,6 +3130,39 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { for (zcu.failed_embed_files.values()) |error_msg| { try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } + { + const SortOrder = struct { + zcu: *Zcu, + err: *?Error, + + const Error = @typeInfo( + @typeInfo(@TypeOf(Zcu.SrcLoc.span)).Fn.return_type.?, + ).ErrorUnion.error_set; + + pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool { + if (ctx.err.*) |_| return lhs_index < rhs_index; + const errors = ctx.zcu.failed_analysis.values(); + const lhs_src_loc = errors[lhs_index].src_loc.upgrade(ctx.zcu); + const rhs_src_loc = errors[rhs_index].src_loc.upgrade(ctx.zcu); + return if (lhs_src_loc.file_scope != rhs_src_loc.file_scope) std.mem.order( + u8, + lhs_src_loc.file_scope.sub_file_path, + rhs_src_loc.file_scope.sub_file_path, + ).compare(.lt) else (lhs_src_loc.span(ctx.zcu.gpa) catch |e| { + ctx.err.* = e; + return lhs_index < rhs_index; + }).main < (rhs_src_loc.span(ctx.zcu.gpa) catch |e| { + ctx.err.* = e; + return lhs_index < rhs_index; + }).main; + } + }; + var err: ?SortOrder.Error = null; + // This leaves `zcu.failed_analysis` an invalid state, but we do not + // need lookups anymore anyway. + zcu.failed_analysis.entries.sort(SortOrder{ .zcu = zcu, .err = &err }); + if (err) |e| return e; + } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { const decl_index = switch (anal_unit.unwrap()) { .decl => |d| d, @@ -3140,7 +3208,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); } - const actual_error_count = zcu.intern_pool.global_error_set.mutate.list.len; + const actual_error_count = zcu.intern_pool.global_error_set.getNamesFromMainThread().len; if (actual_error_count > zcu.error_limit) { try bundle.addRootErrorMessage(.{ .msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{ @@ -3543,18 +3611,18 @@ fn performAllTheWorkInner( comp.codegen_work.cond.signal(); }; - while (true) { - if (comp.work_queue.readItem()) |work_item| { - try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, work_item, main_progress_node); - continue; - } + work: while (true) { + for (&comp.work_queues) |*work_queue| if (work_queue.readItem()) |job| { + try processOneJob(@intFromEnum(Zcu.PerThread.Id.main), comp, job, main_progress_node); + continue :work; + }; if (comp.module) |zcu| { // If there's no work queued, check if there's anything outdated // which we need to work on, and queue it if so. if (try zcu.findOutdatedToAnalyze()) |outdated| { switch (outdated.unwrap()) { - .decl => |decl| try comp.work_queue.writeItem(.{ .analyze_decl = decl }), - .func => |func| try comp.work_queue.writeItem(.{ .analyze_func = func }), + .decl => |decl| try comp.queueJob(.{ .analyze_decl = decl }), + .func => |func| try comp.queueJob(.{ .analyze_func = func }), } continue; } @@ -3575,6 +3643,14 @@ fn performAllTheWorkInner( const JobError = Allocator.Error; +pub fn queueJob(comp: *Compilation, job: Job) !void { + try comp.work_queues[Job.stage(job)].writeItem(job); +} + +pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void { + for (jobs) |job| try comp.queueJob(job); +} + fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) JobError!void { switch (job) { .codegen_decl => |decl_index| { @@ -6478,7 +6554,7 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void { }; const target = comp.root_mod.resolved_target.result; if (target.os.tag == .windows and target.ofmt != .c) { - try comp.work_queue.writeItem(.{ + try comp.queueJob(.{ .windows_import_lib = comp.system_libs.count() - 1, }); } diff --git a/src/InternPool.zig b/src/InternPool.zig index 2b21b25a1d..55ae300cce 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -147,8 +147,6 @@ pub fn trackZir( } defer shard.mutate.tracked_inst_map.len += 1; const local = ip.getLocal(tid); - local.mutate.tracked_insts.mutex.lock(); - defer local.mutate.tracked_insts.mutex.unlock(); const list = local.getMutableTrackedInsts(gpa); try list.ensureUnusedCapacity(1); const map_header = map.header().*; @@ -418,10 +416,10 @@ const Local = struct { arena: std.heap.ArenaAllocator.State, items: ListMutate, - extra: MutexListMutate, + extra: ListMutate, limbs: ListMutate, strings: ListMutate, - tracked_insts: MutexListMutate, + tracked_insts: ListMutate, files: ListMutate, maps: ListMutate, @@ -471,20 +469,12 @@ const Local = struct { const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace }); const ListMutate = struct { + mutex: std.Thread.Mutex, len: u32, const empty: ListMutate = .{ - .len = 0, - }; - }; - - const MutexListMutate = struct { - mutex: std.Thread.Mutex, - list: ListMutate, - - const empty: MutexListMutate = .{ .mutex = .{}, - .list = ListMutate.empty, + .len = 0, }; }; @@ -694,6 +684,8 @@ const Local = struct { const new_slice = new_list.view().slice(); inline for (fields) |field| @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]); } + mutable.mutate.mutex.lock(); + defer mutable.mutate.mutex.unlock(); mutable.list.release(new_list); } @@ -760,7 +752,7 @@ const Local = struct { return .{ .gpa = gpa, .arena = &local.mutate.arena, - .mutate = &local.mutate.extra.list, + .mutate = &local.mutate.extra, .list = &local.shared.extra, }; } @@ -802,7 +794,7 @@ const Local = struct { return .{ .gpa = gpa, .arena = &local.mutate.arena, - .mutate = &local.mutate.tracked_insts.list, + .mutate = &local.mutate.tracked_insts, .list = &local.shared.tracked_insts, }; } @@ -1714,29 +1706,76 @@ pub const Key = union(enum) { comptime_args: Index.Slice, /// Returns a pointer that becomes invalid after any additions to the `InternPool`. - pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis { + fn analysisPtr(func: Func, ip: *InternPool) *FuncAnalysis { const extra = ip.getLocalShared(func.tid).extra.acquire(); return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]); } + pub fn analysisUnordered(func: Func, ip: *const InternPool) FuncAnalysis { + return @atomicLoad(FuncAnalysis, func.analysisPtr(@constCast(ip)), .unordered); + } + + pub fn setAnalysisState(func: Func, ip: *InternPool, state: FuncAnalysis.State) void { + const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const analysis_ptr = func.analysisPtr(ip); + var analysis = analysis_ptr.*; + analysis.state = state; + @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); + } + + pub fn setCallsOrAwaitsErrorableFn(func: Func, ip: *InternPool, value: bool) void { + const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const analysis_ptr = func.analysisPtr(ip); + var analysis = analysis_ptr.*; + analysis.calls_or_awaits_errorable_fn = value; + @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); + } + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. - pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *TrackedInst.Index { + fn zirBodyInstPtr(func: Func, ip: *InternPool) *TrackedInst.Index { const extra = ip.getLocalShared(func.tid).extra.acquire(); return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]); } + pub fn zirBodyInstUnordered(func: Func, ip: *const InternPool) TrackedInst.Index { + return @atomicLoad(TrackedInst.Index, func.zirBodyInstPtr(@constCast(ip)), .unordered); + } + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. - pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 { + fn branchQuotaPtr(func: Func, ip: *InternPool) *u32 { const extra = ip.getLocalShared(func.tid).extra.acquire(); return &extra.view().items(.@"0")[func.branch_quota_extra_index]; } + pub fn branchQuotaUnordered(func: Func, ip: *const InternPool) u32 { + return @atomicLoad(u32, func.branchQuotaPtr(@constCast(ip)), .unordered); + } + + pub fn maxBranchQuota(func: Func, ip: *InternPool, new_branch_quota: u32) void { + const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const branch_quota_ptr = func.branchQuotaPtr(ip); + @atomicStore(u32, branch_quota_ptr, @max(branch_quota_ptr.*, new_branch_quota), .release); + } + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. - pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index { + fn resolvedErrorSetPtr(func: Func, ip: *InternPool) *Index { const extra = ip.getLocalShared(func.tid).extra.acquire(); - assert(func.analysis(ip).inferred_error_set); + assert(func.analysisUnordered(ip).inferred_error_set); return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]); } + + pub fn resolvedErrorSetUnordered(func: Func, ip: *const InternPool) Index { + return @atomicLoad(Index, func.resolvedErrorSetPtr(@constCast(ip)), .unordered); + } }; pub const Int = struct { @@ -2663,47 +2702,170 @@ pub const LoadedUnionType = struct { /// This accessor is provided so that the tag type can be mutated, and so that /// when it is mutated, the mutations are observed. /// The returned pointer expires with any addition to the `InternPool`. - pub fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index { + fn tagTypePtr(self: LoadedUnionType, ip: *InternPool) *Index { const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?; return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); } + pub fn tagTypeUnordered(u: LoadedUnionType, ip: *const InternPool) Index { + return @atomicLoad(Index, u.tagTypePtr(@constCast(ip)), .unordered); + } + + pub fn setTagType(u: LoadedUnionType, ip: *InternPool, tag_type: Index) void { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + @atomicStore(Index, u.tagTypePtr(ip), tag_type, .release); + } + /// The returned pointer expires with any addition to the `InternPool`. - pub fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags { + fn flagsPtr(self: LoadedUnionType, ip: *InternPool) *Tag.TypeUnion.Flags { const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); } + pub fn flagsUnordered(u: LoadedUnionType, ip: *const InternPool) Tag.TypeUnion.Flags { + return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(@constCast(ip)), .unordered); + } + + pub fn setStatus(u: LoadedUnionType, ip: *InternPool, status: Status) void { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + flags.status = status; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + } + + pub fn setStatusIfLayoutWip(u: LoadedUnionType, ip: *InternPool, status: Status) void { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + if (flags.status == .layout_wip) flags.status = status; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + } + + pub fn setAlignment(u: LoadedUnionType, ip: *InternPool, alignment: Alignment) void { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + flags.alignment = alignment; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + } + + pub fn assumeRuntimeBitsIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool) bool { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + defer if (flags.status == .field_types_wip) { + flags.assumed_runtime_bits = true; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + }; + return flags.status == .field_types_wip; + } + + pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + defer if (flags.requires_comptime == .unknown) { + flags.requires_comptime = .wip; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + }; + return flags.requires_comptime; + } + + pub fn setRequiresComptime(u: LoadedUnionType, ip: *InternPool, requires_comptime: RequiresComptime) void { + assert(requires_comptime != .wip); // see setRequiresComptimeWip + + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + flags.requires_comptime = requires_comptime; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + } + + pub fn assumePointerAlignedIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, ptr_align: Alignment) bool { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + defer if (flags.status == .field_types_wip) { + flags.alignment = ptr_align; + flags.assumed_pointer_aligned = true; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); + }; + return flags.status == .field_types_wip; + } + /// The returned pointer expires with any addition to the `InternPool`. - pub fn size(self: LoadedUnionType, ip: *const InternPool) *u32 { + fn sizePtr(self: LoadedUnionType, ip: *InternPool) *u32 { const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?; return &extra.view().items(.@"0")[self.extra_index + field_index]; } + pub fn sizeUnordered(u: LoadedUnionType, ip: *const InternPool) u32 { + return @atomicLoad(u32, u.sizePtr(@constCast(ip)), .unordered); + } + /// The returned pointer expires with any addition to the `InternPool`. - pub fn padding(self: LoadedUnionType, ip: *const InternPool) *u32 { + fn paddingPtr(self: LoadedUnionType, ip: *InternPool) *u32 { const extra = ip.getLocalShared(self.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?; return &extra.view().items(.@"0")[self.extra_index + field_index]; } + pub fn paddingUnordered(u: LoadedUnionType, ip: *const InternPool) u32 { + return @atomicLoad(u32, u.paddingPtr(@constCast(ip)), .unordered); + } + pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool { - return self.flagsPtr(ip).runtime_tag.hasTag(); + return self.flagsUnordered(ip).runtime_tag.hasTag(); } pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool { - return self.flagsPtr(ip).status.haveFieldTypes(); + return self.flagsUnordered(ip).status.haveFieldTypes(); } pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool { - return self.flagsPtr(ip).status.haveLayout(); + return self.flagsUnordered(ip).status.haveLayout(); } - pub fn getLayout(self: LoadedUnionType, ip: *const InternPool) std.builtin.Type.ContainerLayout { - return self.flagsPtr(ip).layout; + pub fn setHaveLayout(u: LoadedUnionType, ip: *InternPool, size: u32, padding: u32, alignment: Alignment) void { + const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + @atomicStore(u32, u.sizePtr(ip), size, .unordered); + @atomicStore(u32, u.paddingPtr(ip), padding, .unordered); + const flags_ptr = u.flagsPtr(ip); + var flags = flags_ptr.*; + flags.alignment = alignment; + flags.status = .have_layout; + @atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); } pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: usize) Alignment { @@ -2726,7 +2888,7 @@ pub const LoadedUnionType = struct { pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void { if (aligns.len == 0) return; - assert(self.flagsPtr(ip).any_aligned_fields); + assert(self.flagsUnordered(ip).any_aligned_fields); @memcpy(self.field_aligns.get(ip), aligns); } }; @@ -2877,26 +3039,26 @@ pub const LoadedStructType = struct { }; /// Look up field index based on field name. - pub fn nameIndex(self: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 { - const names_map = self.names_map.unwrap() orelse { + pub fn nameIndex(s: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const names_map = s.names_map.unwrap() orelse { const i = name.toUnsigned(ip) orelse return null; - if (i >= self.field_types.len) return null; + if (i >= s.field_types.len) return null; return i; }; const map = names_map.getConst(ip); - const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) }; + const adapter: NullTerminatedString.Adapter = .{ .strings = s.field_names.get(ip) }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; return @intCast(field_index); } /// Returns the already-existing field with the same name, if any. pub fn addFieldName( - self: LoadedStructType, + s: LoadedStructType, ip: *InternPool, name: NullTerminatedString, ) ?u32 { - const extra = ip.getLocalShared(self.tid).extra.acquire(); - return ip.addFieldName(extra, self.names_map.unwrap().?, self.field_names.start, name); + const extra = ip.getLocalShared(s.tid).extra.acquire(); + return ip.addFieldName(extra, s.names_map.unwrap().?, s.field_names.start, name); } pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment { @@ -2924,143 +3086,313 @@ pub const LoadedStructType = struct { s.comptime_bits.setBit(ip, i); } - /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more - /// complicated logic. - pub fn knownNonOpv(s: LoadedStructType, ip: *InternPool) bool { - return switch (s.layout) { - .@"packed" => false, - .auto, .@"extern" => s.flagsPtr(ip).known_non_opv, - }; - } - /// The returned pointer expires with any addition to the `InternPool`. /// Asserts the struct is not packed. - pub fn flagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags { - assert(self.layout != .@"packed"); - const extra = ip.getLocalShared(self.tid).extra.acquire(); + fn flagsPtr(s: LoadedStructType, ip: *InternPool) *Tag.TypeStruct.Flags { + assert(s.layout != .@"packed"); + const extra = ip.getLocalShared(s.tid).extra.acquire(); const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?; - return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]); + return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]); + } + + pub fn flagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStruct.Flags { + return @atomicLoad(Tag.TypeStruct.Flags, s.flagsPtr(@constCast(ip)), .unordered); } /// The returned pointer expires with any addition to the `InternPool`. /// Asserts that the struct is packed. - pub fn packedFlagsPtr(self: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags { - assert(self.layout == .@"packed"); - const extra = ip.getLocalShared(self.tid).extra.acquire(); + fn packedFlagsPtr(s: LoadedStructType, ip: *InternPool) *Tag.TypeStructPacked.Flags { + assert(s.layout == .@"packed"); + const extra = ip.getLocalShared(s.tid).extra.acquire(); const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?; - return @ptrCast(&extra.view().items(.@"0")[self.extra_index + flags_field_index]); + return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]); + } + + pub fn packedFlagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStructPacked.Flags { + return @atomicLoad(Tag.TypeStructPacked.Flags, s.packedFlagsPtr(@constCast(ip)), .unordered); + } + + /// Reads the non-opv flag calculated during AstGen. Used to short-circuit more + /// complicated logic. + pub fn knownNonOpv(s: LoadedStructType, ip: *const InternPool) bool { + return switch (s.layout) { + .@"packed" => false, + .auto, .@"extern" => s.flagsUnordered(ip).known_non_opv, + }; + } + + pub fn requiresComptime(s: LoadedStructType, ip: *const InternPool) RequiresComptime { + return s.flagsUnordered(ip).requires_comptime; + } + + pub fn setRequiresComptimeWip(s: LoadedStructType, ip: *InternPool) RequiresComptime { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + defer if (flags.requires_comptime == .unknown) { + flags.requires_comptime = .wip; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + }; + return flags.requires_comptime; + } + + pub fn setRequiresComptime(s: LoadedStructType, ip: *InternPool, requires_comptime: RequiresComptime) void { + assert(requires_comptime != .wip); // see setRequiresComptimeWip + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.requires_comptime = requires_comptime; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); } pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool { if (s.layout == .@"packed") return false; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + const flags_ptr = s.flagsPtr(ip); - if (flags_ptr.field_types_wip) { - flags_ptr.assumed_runtime_bits = true; - return true; - } - return false; + var flags = flags_ptr.*; + defer if (flags.field_types_wip) { + flags.assumed_runtime_bits = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + }; + return flags.field_types_wip; } - pub fn setTypesWip(s: LoadedStructType, ip: *InternPool) bool { + pub fn setFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool { if (s.layout == .@"packed") return false; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + const flags_ptr = s.flagsPtr(ip); - if (flags_ptr.field_types_wip) return true; - flags_ptr.field_types_wip = true; - return false; + var flags = flags_ptr.*; + defer { + flags.field_types_wip = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + return flags.field_types_wip; } - pub fn clearTypesWip(s: LoadedStructType, ip: *InternPool) void { + pub fn clearFieldTypesWip(s: LoadedStructType, ip: *InternPool) void { if (s.layout == .@"packed") return; - s.flagsPtr(ip).field_types_wip = false; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.field_types_wip = false; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); } pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool { if (s.layout == .@"packed") return false; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + const flags_ptr = s.flagsPtr(ip); - if (flags_ptr.layout_wip) return true; - flags_ptr.layout_wip = true; - return false; + var flags = flags_ptr.*; + defer { + flags.layout_wip = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + return flags.layout_wip; } pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void { if (s.layout == .@"packed") return; - s.flagsPtr(ip).layout_wip = false; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.layout_wip = false; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); } - pub fn setAlignmentWip(s: LoadedStructType, ip: *InternPool) bool { - if (s.layout == .@"packed") return false; + pub fn setAlignment(s: LoadedStructType, ip: *InternPool, alignment: Alignment) void { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + const flags_ptr = s.flagsPtr(ip); - if (flags_ptr.alignment_wip) return true; - flags_ptr.alignment_wip = true; - return false; + var flags = flags_ptr.*; + flags.alignment = alignment; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + + pub fn assumePointerAlignedIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + defer if (flags.field_types_wip) { + flags.alignment = ptr_align; + flags.assumed_pointer_aligned = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + }; + return flags.field_types_wip; + } + + pub fn assumePointerAlignedIfWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + defer { + if (flags.alignment_wip) { + flags.alignment = ptr_align; + flags.assumed_pointer_aligned = true; + } else flags.alignment_wip = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + return flags.alignment_wip; } pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void { if (s.layout == .@"packed") return; - s.flagsPtr(ip).alignment_wip = false; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.alignment_wip = false; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); } pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool { - const local = ip.getLocal(s.tid); - local.mutate.extra.mutex.lock(); - defer local.mutate.extra.mutex.unlock(); - return switch (s.layout) { - .@"packed" => @as(Tag.TypeStructPacked.Flags, @bitCast(@atomicRmw( - u32, - @as(*u32, @ptrCast(s.packedFlagsPtr(ip))), - .Or, - @bitCast(Tag.TypeStructPacked.Flags{ .field_inits_wip = true }), - .acq_rel, - ))).field_inits_wip, - .auto, .@"extern" => @as(Tag.TypeStruct.Flags, @bitCast(@atomicRmw( - u32, - @as(*u32, @ptrCast(s.flagsPtr(ip))), - .Or, - @bitCast(Tag.TypeStruct.Flags{ .field_inits_wip = true }), - .acq_rel, - ))).field_inits_wip, - }; + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + switch (s.layout) { + .@"packed" => { + const flags_ptr = s.packedFlagsPtr(ip); + var flags = flags_ptr.*; + defer { + flags.field_inits_wip = true; + @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release); + } + return flags.field_inits_wip; + }, + .auto, .@"extern" => { + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + defer { + flags.field_inits_wip = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + return flags.field_inits_wip; + }, + } } pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + switch (s.layout) { - .@"packed" => s.packedFlagsPtr(ip).field_inits_wip = false, - .auto, .@"extern" => s.flagsPtr(ip).field_inits_wip = false, + .@"packed" => { + const flags_ptr = s.packedFlagsPtr(ip); + var flags = flags_ptr.*; + flags.field_inits_wip = false; + @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release); + }, + .auto, .@"extern" => { + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.field_inits_wip = false; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + }, } } pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool { if (s.layout == .@"packed") return true; + + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + const flags_ptr = s.flagsPtr(ip); - if (flags_ptr.fully_resolved) return true; - flags_ptr.fully_resolved = true; - return false; + var flags = flags_ptr.*; + defer { + flags.fully_resolved = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + return flags.fully_resolved; } pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void { - s.flagsPtr(ip).fully_resolved = false; + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.fully_resolved = false; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); } /// The returned pointer expires with any addition to the `InternPool`. /// Asserts the struct is not packed. - pub fn size(self: LoadedStructType, ip: *InternPool) *u32 { - assert(self.layout != .@"packed"); - const extra = ip.getLocalShared(self.tid).extra.acquire(); + fn sizePtr(s: LoadedStructType, ip: *InternPool) *u32 { + assert(s.layout != .@"packed"); + const extra = ip.getLocalShared(s.tid).extra.acquire(); const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?; - return @ptrCast(&extra.view().items(.@"0")[self.extra_index + size_field_index]); + return @ptrCast(&extra.view().items(.@"0")[s.extra_index + size_field_index]); + } + + pub fn sizeUnordered(s: LoadedStructType, ip: *const InternPool) u32 { + return @atomicLoad(u32, s.sizePtr(@constCast(ip)), .unordered); } /// The backing integer type of the packed struct. Whether zig chooses /// this type or the user specifies it, it is stored here. This will be /// set to `none` until the layout is resolved. /// Asserts the struct is packed. - pub fn backingIntType(s: LoadedStructType, ip: *InternPool) *Index { + fn backingIntTypePtr(s: LoadedStructType, ip: *InternPool) *Index { assert(s.layout == .@"packed"); const extra = ip.getLocalShared(s.tid).extra.acquire(); const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?; return @ptrCast(&extra.view().items(.@"0")[s.extra_index + field_index]); } + pub fn backingIntTypeUnordered(s: LoadedStructType, ip: *const InternPool) Index { + return @atomicLoad(Index, s.backingIntTypePtr(@constCast(ip)), .unordered); + } + + pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, backing_int_ty: Index) void { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + @atomicStore(Index, s.backingIntTypePtr(ip), backing_int_ty, .release); + } + /// Asserts the struct is not packed. pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { assert(s.layout != .@"packed"); @@ -3073,29 +3405,56 @@ pub const LoadedStructType = struct { return types.len == 0 or types[0] != .none; } - pub fn haveFieldInits(s: LoadedStructType, ip: *InternPool) bool { + pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool { return switch (s.layout) { - .@"packed" => s.packedFlagsPtr(ip).inits_resolved, - .auto, .@"extern" => s.flagsPtr(ip).inits_resolved, + .@"packed" => s.packedFlagsUnordered(ip).inits_resolved, + .auto, .@"extern" => s.flagsUnordered(ip).inits_resolved, }; } pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + switch (s.layout) { - .@"packed" => s.packedFlagsPtr(ip).inits_resolved = true, - .auto, .@"extern" => s.flagsPtr(ip).inits_resolved = true, + .@"packed" => { + const flags_ptr = s.packedFlagsPtr(ip); + var flags = flags_ptr.*; + flags.inits_resolved = true; + @atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release); + }, + .auto, .@"extern" => { + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.inits_resolved = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + }, } } pub fn haveLayout(s: LoadedStructType, ip: *InternPool) bool { return switch (s.layout) { - .@"packed" => s.backingIntType(ip).* != .none, - .auto, .@"extern" => s.flagsPtr(ip).layout_resolved, + .@"packed" => s.backingIntTypeUnordered(ip) != .none, + .auto, .@"extern" => s.flagsUnordered(ip).layout_resolved, }; } + pub fn setLayoutResolved(s: LoadedStructType, ip: *InternPool, size: u32, alignment: Alignment) void { + const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + @atomicStore(u32, s.sizePtr(ip), size, .unordered); + const flags_ptr = s.flagsPtr(ip); + var flags = flags_ptr.*; + flags.alignment = alignment; + flags.layout_resolved = true; + @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); + } + pub fn isTuple(s: LoadedStructType, ip: *InternPool) bool { - return s.layout != .@"packed" and s.flagsPtr(ip).is_tuple; + return s.layout != .@"packed" and s.flagsUnordered(ip).is_tuple; } pub fn hasReorderedFields(s: LoadedStructType) bool { @@ -3209,7 +3568,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { const decl: DeclIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "decl").?]); const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?]; - const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .monotonic)); + const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered)); var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len); const captures_len = if (flags.any_captures) c: { const len = extra_list.view().items(.@"0")[extra_index]; @@ -3317,7 +3676,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?]; const namespace: OptionalNamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]); const names_map: MapIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "names_map").?]); - const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .monotonic)); + const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered)); var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len); const has_inits = item.tag == .type_struct_packed_inits; const captures_len = if (flags.any_captures) c: { @@ -5442,10 +5801,10 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .arena = .{}, .items = Local.ListMutate.empty, - .extra = Local.MutexListMutate.empty, + .extra = Local.ListMutate.empty, .limbs = Local.ListMutate.empty, .strings = Local.ListMutate.empty, - .tracked_insts = Local.MutexListMutate.empty, + .tracked_insts = Local.ListMutate.empty, .files = Local.ListMutate.empty, .maps = Local.ListMutate.empty, @@ -5635,7 +5994,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const extra_list = unwrapped_index.getExtra(ip); const extra_items = extra_list.view().items(.@"0"); const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); - const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .monotonic)); + const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered)); const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStruct).Struct.fields.len); if (flags.is_reified) { assert(!flags.any_captures); @@ -5658,7 +6017,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const extra_list = unwrapped_index.getExtra(ip); const extra_items = extra_list.view().items(.@"0"); const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]); - const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .monotonic)); + const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered)); const end_extra_index = data + @as(u32, @typeInfo(Tag.TypeStructPacked).Struct.fields.len); if (flags.is_reified) { assert(!flags.any_captures); @@ -6155,7 +6514,7 @@ fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.Func { const extra_items = extra.view().items(.@"0"); const analysis_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?; - const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .monotonic)); + const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .unordered)); const owner_decl: DeclIndex = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").?]); const ty: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?]); const generic_owner: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?]); @@ -8702,7 +9061,7 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { // Restore the original item at this index. assert(static_keys[@intFromEnum(index)] == .simple_type); const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .simple_type, .monotonic); + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .simple_type, .unordered); return; } @@ -8719,7 +9078,7 @@ pub fn remove(ip: *InternPool, tid: Zcu.PerThread.Id, index: Index) void { // Thus, we will rewrite the tag to `removed`, leaking the item until // next GC but causing `KeyAdapter` to ignore it. const items = ip.getLocalShared(unwrapped_index.tid).items.acquire().view(); - @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .monotonic); + @atomicStore(Tag, &items.items(.tag)[unwrapped_index.index], .removed, .unordered); } fn addInt( @@ -9415,9 +9774,11 @@ pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { const unwrapped_index = index.unwrap(ip); + const local = ip.getLocal(unwrapped_index.tid); local.mutate.extra.mutex.lock(); defer local.mutate.extra.mutex.unlock(); + const extra_items = local.shared.extra.view().items(.@"0"); const item = unwrapped_index.getItem(ip); assert(item.tag == .variable); @@ -9436,7 +9797,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { var decls_len: usize = 0; for (ip.locals) |*local| { items_len += local.mutate.items.len; - extra_len += local.mutate.extra.list.len; + extra_len += local.mutate.extra.len; limbs_len += local.mutate.limbs.len; decls_len += local.mutate.decls.buckets_list.len; } @@ -10730,29 +11091,29 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois }; } -pub fn isFuncBody(ip: *const InternPool, index: Index) bool { - return switch (index.unwrap(ip).getTag(ip)) { +pub fn isFuncBody(ip: *const InternPool, func: Index) bool { + return switch (func.unwrap(ip).getTag(ip)) { .func_decl, .func_instance, .func_coerced => true, else => false, }; } -pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis { - const unwrapped_index = index.unwrap(ip); - const extra = unwrapped_index.getExtra(ip); - const item = unwrapped_index.getItem(ip); +fn funcAnalysisPtr(ip: *InternPool, func: Index) *FuncAnalysis { + const unwrapped_func = func.unwrap(ip); + const extra = unwrapped_func.getExtra(ip); + const item = unwrapped_func.getItem(ip); const extra_index = switch (item.tag) { .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, .func_coerced => { const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; - const func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]); - const unwrapped_func = func_index.unwrap(ip); - const func_item = unwrapped_func.getItem(ip); - return @ptrCast(&unwrapped_func.getExtra(ip).view().items(.@"0")[ - switch (func_item.tag) { - .func_decl => func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, - .func_instance => func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, + const coerced_func_index: Index = @enumFromInt(extra.view().items(.@"0")[extra_index]); + const unwrapped_coerced_func = coerced_func_index.unwrap(ip); + const coerced_func_item = unwrapped_coerced_func.getItem(ip); + return @ptrCast(&unwrapped_coerced_func.getExtra(ip).view().items(.@"0")[ + switch (coerced_func_item.tag) { + .func_decl => coerced_func_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, + .func_instance => coerced_func_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, else => unreachable, } ]); @@ -10762,14 +11123,65 @@ pub fn funcAnalysis(ip: *const InternPool, index: Index) *FuncAnalysis { return @ptrCast(&extra.view().items(.@"0")[extra_index]); } -pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { - return funcAnalysis(ip, i).inferred_error_set; +pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis { + return @atomicLoad(FuncAnalysis, @constCast(ip).funcAnalysisPtr(func), .unordered); } -pub fn funcZirBodyInst(ip: *const InternPool, index: Index) TrackedInst.Index { - const unwrapped_index = index.unwrap(ip); - const item = unwrapped_index.getItem(ip); - const item_extra = unwrapped_index.getExtra(ip); +pub fn funcSetAnalysisState(ip: *InternPool, func: Index, state: FuncAnalysis.State) void { + const unwrapped_func = func.unwrap(ip); + const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const analysis_ptr = ip.funcAnalysisPtr(func); + var analysis = analysis_ptr.*; + analysis.state = state; + @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); +} + +pub fn funcMaxStackAlignment(ip: *InternPool, func: Index, new_stack_alignment: Alignment) void { + const unwrapped_func = func.unwrap(ip); + const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const analysis_ptr = ip.funcAnalysisPtr(func); + var analysis = analysis_ptr.*; + analysis.stack_alignment = switch (analysis.stack_alignment) { + .none => new_stack_alignment, + else => |old_stack_alignment| old_stack_alignment.maxStrict(new_stack_alignment), + }; + @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); +} + +pub fn funcSetCallsOrAwaitsErrorableFn(ip: *InternPool, func: Index) void { + const unwrapped_func = func.unwrap(ip); + const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const analysis_ptr = ip.funcAnalysisPtr(func); + var analysis = analysis_ptr.*; + analysis.calls_or_awaits_errorable_fn = true; + @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); +} + +pub fn funcSetCold(ip: *InternPool, func: Index, is_cold: bool) void { + const unwrapped_func = func.unwrap(ip); + const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + const analysis_ptr = ip.funcAnalysisPtr(func); + var analysis = analysis_ptr.*; + analysis.is_cold = is_cold; + @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); +} + +pub fn funcZirBodyInst(ip: *const InternPool, func: Index) TrackedInst.Index { + const unwrapped_func = func.unwrap(ip); + const item = unwrapped_func.getItem(ip); + const item_extra = unwrapped_func.getExtra(ip); const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; switch (item.tag) { .func_decl => return @enumFromInt(item_extra.view().items(.@"0")[item.data + zir_body_inst_field_index]), @@ -10806,17 +11218,17 @@ pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { /// Returns a mutable pointer to the resolved error set type of an inferred /// error set function. The returned pointer is invalidated when anything is /// added to `ip`. -pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { +fn iesResolvedPtr(ip: *InternPool, ies_index: Index) *Index { const ies_item = ies_index.getItem(ip); assert(ies_item.tag == .type_inferred_error_set); - return funcIesResolved(ip, ies_item.data); + return ip.funcIesResolvedPtr(ies_item.data); } /// Returns a mutable pointer to the resolved error set type of an inferred /// error set function. The returned pointer is invalidated when anything is /// added to `ip`. -pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { - assert(funcHasInferredErrorSet(ip, func_index)); +fn funcIesResolvedPtr(ip: *InternPool, func_index: Index) *Index { + assert(ip.funcAnalysisUnordered(func_index).inferred_error_set); const unwrapped_func = func_index.unwrap(ip); const func_extra = unwrapped_func.getExtra(ip); const func_item = unwrapped_func.getItem(ip); @@ -10842,6 +11254,19 @@ pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { return @ptrCast(&func_extra.view().items(.@"0")[extra_index]); } +pub fn funcIesResolvedUnordered(ip: *const InternPool, index: Index) Index { + return @atomicLoad(Index, @constCast(ip).funcIesResolvedPtr(index), .unordered); +} + +pub fn funcSetIesResolved(ip: *InternPool, index: Index, ies: Index) void { + const unwrapped_func = index.unwrap(ip); + const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; + extra_mutex.lock(); + defer extra_mutex.unlock(); + + @atomicStore(Index, ip.funcIesResolvedPtr(index), ies, .release); +} + pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func { const unwrapped_index = index.unwrap(ip); const item = unwrapped_index.getItem(ip); @@ -10950,7 +11375,10 @@ const GlobalErrorSet = struct { names: Names, map: Shard.Map(GlobalErrorSet.Index), } align(std.atomic.cache_line), - mutate: Local.MutexListMutate align(std.atomic.cache_line), + mutate: struct { + names: Local.ListMutate, + map: struct { mutex: std.Thread.Mutex }, + } align(std.atomic.cache_line), const Names = Local.List(struct { NullTerminatedString }); @@ -10959,7 +11387,10 @@ const GlobalErrorSet = struct { .names = Names.empty, .map = Shard.Map(GlobalErrorSet.Index).empty, }, - .mutate = Local.MutexListMutate.empty, + .mutate = .{ + .names = Local.ListMutate.empty, + .map = .{ .mutex = .{} }, + }, }; const Index = enum(Zcu.ErrorInt) { @@ -10969,7 +11400,7 @@ const GlobalErrorSet = struct { /// Not thread-safe, may only be called from the main thread. pub fn getNamesFromMainThread(ges: *const GlobalErrorSet) []const NullTerminatedString { - const len = ges.mutate.list.len; + const len = ges.mutate.names.len; return if (len > 0) ges.shared.names.view().items(.@"0")[0..len] else &.{}; } @@ -10994,8 +11425,8 @@ const GlobalErrorSet = struct { if (entry.hash != hash) continue; if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index; } - ges.mutate.mutex.lock(); - defer ges.mutate.mutex.unlock(); + ges.mutate.map.mutex.lock(); + defer ges.mutate.map.mutex.unlock(); if (map.entries != ges.shared.map.entries) { map = ges.shared.map; map_mask = map.header().mask(); @@ -11012,12 +11443,12 @@ const GlobalErrorSet = struct { const mutable_names: Names.Mutable = .{ .gpa = gpa, .arena = arena_state, - .mutate = &ges.mutate.list, + .mutate = &ges.mutate.names, .list = &ges.shared.names, }; try mutable_names.ensureUnusedCapacity(1); const map_header = map.header().*; - if (ges.mutate.list.len < map_header.capacity * 3 / 5) { + if (ges.mutate.names.len < map_header.capacity * 3 / 5) { mutable_names.appendAssumeCapacity(.{name}); const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len); const entry = &map.entries[map_index]; diff --git a/src/Sema.zig b/src/Sema.zig index eb49bc037e..f5ee909caf 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2530,13 +2530,13 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error } if (sema.owner_func_index != .none) { - ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; + ip.funcSetAnalysisState(sema.owner_func_index, .sema_failure); } else { sema.owner_decl.analysis = .sema_failure; } if (sema.func_index != .none) { - ip.funcAnalysis(sema.func_index).state = .sema_failure; + ip.funcSetAnalysisState(sema.func_index, .sema_failure); } return error.AnalysisFail; @@ -2848,7 +2848,7 @@ fn zirStructDecl( } try pt.finalizeAnonDecl(new_decl_index); - try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -3353,7 +3353,7 @@ fn zirUnionDecl( } try pt.finalizeAnonDecl(new_decl_index); - try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); } @@ -6550,14 +6550,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst } sema.prev_stack_alignment_src = src; - const ip = &mod.intern_pool; - const a = ip.funcAnalysis(sema.func_index); - if (a.stack_alignment != .none) { - a.stack_alignment = @enumFromInt(@max( - @intFromEnum(alignment), - @intFromEnum(a.stack_alignment), - )); - } + mod.intern_pool.funcMaxStackAlignment(sema.func_index, alignment); } fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { @@ -6570,7 +6563,7 @@ fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) .needed_comptime_reason = "operand to @setCold must be comptime-known", }); if (sema.func_index == .none) return; // does nothing outside a function - ip.funcAnalysis(sema.func_index).is_cold = is_cold; + ip.funcSetCold(sema.func_index, is_cold); } fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { @@ -7085,7 +7078,7 @@ fn zirCall( const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call); if (sema.owner_func_index == .none or - !mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) + !mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn) { // No errorable fn actually called; we have no error return trace input_is_error = false; @@ -7793,7 +7786,7 @@ fn analyzeCall( _ = ics.callee(); if (!inlining.has_comptime_args) { - if (module_fn.analysis(ip).state == .sema_failure) + if (module_fn.analysisUnordered(ip).state == .sema_failure) return error.AnalysisFail; var block_it = block; @@ -7816,7 +7809,7 @@ fn analyzeCall( try sema.resolveInst(fn_info.ret_ty_ref); const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } }; sema.fn_ret_ty = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); - if (module_fn.analysis(ip).inferred_error_set) { + if (module_fn.analysisUnordered(ip).inferred_error_set) { // Create a fresh inferred error set type for inline/comptime calls. const ies = try sema.arena.create(InferredErrorSet); ies.* = .{ .func = .none }; @@ -7942,7 +7935,7 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) { - ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; + ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index); } if (try sema.resolveValue(func)) |func_val| { @@ -8386,7 +8379,7 @@ fn instantiateGenericCall( const callee_index = (child_sema.resolveConstDefinedValue(&child_block, LazySrcLoc.unneeded, new_func_inst, undefined) catch unreachable).toIntern(); const callee = zcu.funcInfo(callee_index); - callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); + callee.maxBranchQuota(ip, sema.branch_quota); // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = Type.fromInterned(callee.ty); @@ -8408,7 +8401,7 @@ fn instantiateGenericCall( if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(zcu)) { - ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; + ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index); } try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); @@ -8769,9 +8762,9 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt)); if (int > len: { const mutate = &ip.global_error_set.mutate; - mutate.mutex.lock(); - defer mutate.mutex.unlock(); - break :len mutate.list.len; + mutate.map.mutex.lock(); + defer mutate.map.mutex.unlock(); + break :len mutate.names.len; } or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return Air.internedToRef((try pt.intern(.{ .err = .{ @@ -18395,7 +18388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try ty.resolveLayout(pt); // Getting alignment requires type layout const union_obj = mod.typeToUnion(ty).?; const tag_type = union_obj.loadTagType(ip); - const layout = union_obj.getLayout(ip); + const layout = union_obj.flagsUnordered(ip).layout; const union_field_vals = try gpa.alloc(InternPool.Index, tag_type.names.len); defer gpa.free(union_field_vals); @@ -18713,8 +18706,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const backing_integer_val = try pt.intern(.{ .opt = .{ .ty = (try pt.optionalType(.type_type)).toIntern(), .val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: { - assert(Type.fromInterned(packed_struct.backingIntType(ip).*).isInt(mod)); - break :val packed_struct.backingIntType(ip).*; + assert(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)).isInt(mod)); + break :val packed_struct.backingIntTypeUnordered(ip); } else .none, } }); @@ -19795,7 +19788,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_ return; } - if (!mod.intern_pool.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return; + if (!mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn) return; if (!start_block.ownerModule().error_tracing) return; assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere @@ -21053,7 +21046,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern()); if (sema.owner_func_index != .none and - ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and + ip.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn and block.ownerModule().error_tracing) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); @@ -22201,11 +22194,11 @@ fn reifyUnion( if (any_aligns) { loaded_union.setFieldAligns(ip, field_aligns); } - loaded_union.tagTypePtr(ip).* = enum_tag_ty; - loaded_union.flagsPtr(ip).status = .have_field_types; + loaded_union.setTagType(ip, enum_tag_ty); + loaded_union.setStatus(ip, .have_field_types); try pt.finalizeAnonDecl(new_decl_index); - try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -22464,15 +22457,15 @@ fn reifyStruct( if (opt_backing_int_val.optionalValue(mod)) |backing_int_val| { const backing_int_ty = backing_int_val.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); - struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); + struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } else { const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum)); - struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); + struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } } try pt.finalizeAnonDecl(new_decl_index); - try mod.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); } @@ -28347,7 +28340,7 @@ fn unionFieldPtr( .is_const = union_ptr_info.flags.is_const, .is_volatile = union_ptr_info.flags.is_volatile, .address_space = union_ptr_info.flags.address_space, - .alignment = if (union_obj.getLayout(ip) == .auto) blk: { + .alignment = if (union_obj.flagsUnordered(ip).layout == .auto) blk: { const union_align = if (union_ptr_info.flags.alignment != .none) union_ptr_info.flags.alignment else @@ -28375,7 +28368,7 @@ fn unionFieldPtr( } if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: { - switch (union_obj.getLayout(ip)) { + switch (union_obj.flagsUnordered(ip).layout) { .auto => if (initializing) { // Store to the union to initialize the tag. const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); @@ -28413,7 +28406,7 @@ fn unionFieldPtr( } try sema.requireRuntimeBlock(block, src, null); - if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and + if (!initializing and union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1) { const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); @@ -28456,7 +28449,7 @@ fn unionFieldVal( const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); - switch (union_obj.getLayout(ip)) { + switch (union_obj.flagsUnordered(ip).layout) { .auto => { if (tag_matches) { return Air.internedToRef(un.val); @@ -28490,7 +28483,7 @@ fn unionFieldVal( } try sema.requireRuntimeBlock(block, src, null); - if (union_obj.getLayout(ip) == .auto and block.wantSafety() and + if (union_obj.flagsUnordered(ip).layout == .auto and block.wantSafety() and union_ty.unionTagTypeSafety(zcu) != null and union_obj.field_types.len > 1) { const wanted_tag_val = try pt.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index); @@ -32037,7 +32030,7 @@ pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) Compile pt.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func_index != .none) { - ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; + ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure); } else { sema.owner_decl.analysis = .dependency_failure; } @@ -32051,7 +32044,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void const ip = &mod.intern_pool; pt.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func_index != .none) { - ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; + ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure); } else { sema.owner_decl.analysis = .dependency_failure; } @@ -32397,7 +32390,7 @@ fn analyzeIsNonErrComptimeOnly( // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. - switch (ip.funcIesResolved(func_index).*) { + switch (ip.funcIesResolvedUnordered(func_index)) { .anyerror_type => break :blk, .none => {}, else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk, @@ -33466,7 +33459,7 @@ fn wrapErrorUnionSet( .inferred_error_set_type => |func_index| ok: { // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. - switch (ip.funcIesResolved(func_index).*) { + switch (ip.funcIesResolvedUnordered(func_index)) { .anyerror_type => break :ok, .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { break :ok; @@ -35071,33 +35064,25 @@ pub fn resolveStructAlignment( assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); - assert(struct_type.flagsPtr(ip).alignment == .none); assert(struct_type.layout != .@"packed"); + assert(struct_type.flagsUnordered(ip).alignment == .none); - if (struct_type.flagsPtr(ip).field_types_wip) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - struct_type.flagsPtr(ip).assumed_pointer_aligned = true; - const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); - struct_type.flagsPtr(ip).alignment = result; - return; - } + const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + if (struct_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return; try sema.resolveTypeFieldsStruct(ty, struct_type); - if (struct_type.setAlignmentWip(ip)) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - struct_type.flagsPtr(ip).assumed_pointer_aligned = true; - const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); - struct_type.flagsPtr(ip).alignment = result; - return; - } + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + if (struct_type.assumePointerAlignedIfWip(ip, ptr_align)) return; defer struct_type.clearAlignmentWip(ip); - var result: Alignment = .@"1"; + var alignment: Alignment = .@"1"; for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); @@ -35109,10 +35094,10 @@ pub fn resolveStructAlignment( struct_type.layout, .sema, ); - result = result.maxStrict(field_align); + alignment = alignment.maxStrict(field_align); } - struct_type.flagsPtr(ip).alignment = result; + struct_type.setAlignment(ip, alignment); } pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { @@ -35177,7 +35162,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { big_align = big_align.maxStrict(field_align.*); } - if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { + if (struct_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { const msg = try sema.errMsg( ty.srcLoc(zcu), "struct layout depends on it having runtime bits", @@ -35186,7 +35171,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { return sema.failWithOwnedErrorMsg(null, msg); } - if (struct_type.flagsPtr(ip).assumed_pointer_aligned and + if (struct_type.flagsUnordered(ip).assumed_pointer_aligned and big_align.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8)))) { const msg = try sema.errMsg( @@ -35254,10 +35239,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { offsets[i] = @intCast(aligns[i].forward(offset)); offset = offsets[i] + sizes[i]; } - struct_type.size(ip).* = @intCast(big_align.forward(offset)); - const flags = struct_type.flagsPtr(ip); - flags.alignment = big_align; - flags.layout_resolved = true; + struct_type.setLayoutResolved(ip, @intCast(big_align.forward(offset)), big_align); _ = try sema.typeRequiresComptime(ty); } @@ -35350,13 +35332,13 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); - struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); + struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } else { if (fields_bit_sum > std.math.maxInt(u16)) { return sema.fail(&block, block.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } const backing_int_ty = try pt.intType(.unsigned, @intCast(fields_bit_sum)); - struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); + struct_type.setBackingIntType(ip, backing_int_ty.toIntern()); } try sema.flushExports(); @@ -35430,15 +35412,12 @@ pub fn resolveUnionAlignment( assert(!union_type.haveLayout(ip)); - if (union_type.flagsPtr(ip).status == .field_types_wip) { - // We'll guess "pointer-aligned", if the union has an - // underaligned pointer field then some allocations - // might require explicit alignment. - union_type.flagsPtr(ip).assumed_pointer_aligned = true; - const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); - union_type.flagsPtr(ip).alignment = result; - return; - } + const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + + // We'll guess "pointer-aligned", if the union has an + // underaligned pointer field then some allocations + // might require explicit alignment. + if (union_type.assumePointerAlignedIfFieldTypesWip(ip, ptr_align)) return; try sema.resolveTypeFieldsUnion(ty, union_type); @@ -35456,7 +35435,7 @@ pub fn resolveUnionAlignment( max_align = max_align.max(field_align); } - union_type.flagsPtr(ip).alignment = max_align; + union_type.setAlignment(ip, max_align); } /// This logic must be kept in sync with `Module.getUnionLayout`. @@ -35471,7 +35450,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { assert(sema.ownerUnit().unwrap().decl == union_type.decl); - switch (union_type.flagsPtr(ip).status) { + const old_flags = union_type.flagsUnordered(ip); + switch (old_flags.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try sema.errMsg( @@ -35484,12 +35464,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { .have_layout, .fully_resolved_wip, .fully_resolved => return, } - const prev_status = union_type.flagsPtr(ip).status; - errdefer if (union_type.flagsPtr(ip).status == .layout_wip) { - union_type.flagsPtr(ip).status = prev_status; - }; + errdefer union_type.setStatusIfLayoutWip(ip, old_flags.status); - union_type.flagsPtr(ip).status = .layout_wip; + union_type.setStatus(ip, .layout_wip); var max_size: u64 = 0; var max_align: Alignment = .@"1"; @@ -35516,8 +35493,8 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { max_align = max_align.max(field_align); } - const flags = union_type.flagsPtr(ip); - const has_runtime_tag = flags.runtime_tag.hasTag() and try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty)); + const has_runtime_tag = union_type.flagsUnordered(ip).runtime_tag.hasTag() and + try sema.typeHasRuntimeBits(Type.fromInterned(union_type.enum_tag_ty)); const size, const alignment, const padding = if (has_runtime_tag) layout: { const enum_tag_type = Type.fromInterned(union_type.enum_tag_ty); const tag_align = try sema.typeAbiAlignment(enum_tag_type); @@ -35551,12 +35528,9 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { break :layout .{ size, max_align.max(tag_align), padding }; } else .{ max_align.forward(max_size), max_align, 0 }; - union_type.size(ip).* = @intCast(size); - union_type.padding(ip).* = padding; - flags.alignment = alignment; - flags.status = .have_layout; + union_type.setHaveLayout(ip, @intCast(size), padding, alignment); - if (union_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { + if (union_type.flagsUnordered(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) { const msg = try sema.errMsg( ty.srcLoc(pt.zcu), "union layout depends on it having runtime bits", @@ -35565,7 +35539,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { return sema.failWithOwnedErrorMsg(null, msg); } - if (union_type.flagsPtr(ip).assumed_pointer_aligned and + if (union_type.flagsUnordered(ip).assumed_pointer_aligned and alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8)))) { const msg = try sema.errMsg( @@ -35612,7 +35586,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { assert(sema.ownerUnit().unwrap().decl == union_obj.decl); - switch (union_obj.flagsPtr(ip).status) { + switch (union_obj.flagsUnordered(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, } @@ -35621,15 +35595,15 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { // After we have resolve union layout we have to go over the fields again to // make sure pointer fields get their child types resolved as well. // See also similar code for structs. - const prev_status = union_obj.flagsPtr(ip).status; - errdefer union_obj.flagsPtr(ip).status = prev_status; + const prev_status = union_obj.flagsUnordered(ip).status; + errdefer union_obj.setStatus(ip, prev_status); - union_obj.flagsPtr(ip).status = .fully_resolved_wip; + union_obj.setStatus(ip, .fully_resolved_wip); for (0..union_obj.field_types.len) |field_index| { const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); try field_ty.resolveFully(pt); } - union_obj.flagsPtr(ip).status = .fully_resolved; + union_obj.setStatus(ip, .fully_resolved); } // And let's not forget comptime-only status. @@ -35662,7 +35636,7 @@ pub fn resolveTypeFieldsStruct( if (struct_type.haveFieldTypes(ip)) return; - if (struct_type.setTypesWip(ip)) { + if (struct_type.setFieldTypesWip(ip)) { const msg = try sema.errMsg( Type.fromInterned(ty).srcLoc(zcu), "struct '{}' depends on itself", @@ -35670,7 +35644,7 @@ pub fn resolveTypeFieldsStruct( ); return sema.failWithOwnedErrorMsg(null, msg); } - defer struct_type.clearTypesWip(ip); + defer struct_type.clearFieldTypesWip(ip); semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) { error.AnalysisFail => { @@ -35739,7 +35713,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load }, else => {}, } - switch (union_type.flagsPtr(ip).status) { + switch (union_type.flagsUnordered(ip).status) { .none => {}, .field_types_wip => { const msg = try sema.errMsg( @@ -35757,8 +35731,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load => return, } - union_type.flagsPtr(ip).status = .field_types_wip; - errdefer union_type.flagsPtr(ip).status = .none; + union_type.setStatus(ip, .field_types_wip); + errdefer union_type.setStatus(ip, .none); semaUnionFields(pt, sema.arena, union_type) catch |err| switch (err) { error.AnalysisFail => { if (owner_decl.analysis == .complete) { @@ -35769,7 +35743,7 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load error.OutOfMemory => return error.OutOfMemory, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; - union_type.flagsPtr(ip).status = .have_field_types; + union_type.setStatus(ip, .have_field_types); } /// Returns a normal error set corresponding to the fully populated inferred @@ -35790,10 +35764,10 @@ fn resolveInferredErrorSet( // TODO: during an incremental update this might not be `.none`, but the // function might be out-of-date! - const resolved_ty = func.resolvedErrorSet(ip).*; + const resolved_ty = func.resolvedErrorSetUnordered(ip); if (resolved_ty != .none) return resolved_ty; - if (func.analysis(ip).state == .in_progress) + if (func.analysisUnordered(ip).state == .in_progress) return sema.fail(block, src, "unable to resolve inferred error set", .{}); // In order to ensure that all dependencies are properly added to the set, @@ -35830,7 +35804,7 @@ fn resolveInferredErrorSet( // This will now have been resolved by the logic at the end of `Module.analyzeFnBody` // which calls `resolveInferredErrorSetPtr`. - const final_resolved_ty = func.resolvedErrorSet(ip).*; + const final_resolved_ty = func.resolvedErrorSetUnordered(ip); assert(final_resolved_ty != .none); return final_resolved_ty; } @@ -35996,8 +35970,7 @@ fn semaStructFields( return; }, .auto, .@"extern" => { - struct_type.size(ip).* = 0; - struct_type.flagsPtr(ip).layout_resolved = true; + struct_type.setLayoutResolved(ip, 0, .none); return; }, }; @@ -36191,7 +36164,7 @@ fn semaStructFields( extra_index += zir_field.init_body_len; } - struct_type.clearTypesWip(ip); + struct_type.clearFieldTypesWip(ip); if (!any_inits) struct_type.setHaveFieldInits(ip); try sema.flushExports(); @@ -36467,7 +36440,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L } } else { // The provided type is the enum tag type. - union_type.tagTypePtr(ip).* = provided_ty.toIntern(); + union_type.setTagType(ip, provided_ty.toIntern()); const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) { .enum_type => ip.loadEnumType(provided_ty.toIntern()), else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}), @@ -36605,10 +36578,11 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L } if (explicit_tags_seen.len > 0) { - const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*); + const tag_ty = union_type.tagTypeUnordered(ip); + const tag_info = ip.loadEnumType(tag_ty); const enum_index = tag_info.nameIndex(ip, field_name) orelse { return sema.fail(&block_scope, name_src, "no field named '{}' in enum '{}'", .{ - field_name.fmt(ip), Type.fromInterned(union_type.tagTypePtr(ip).*).fmt(pt), + field_name.fmt(ip), Type.fromInterned(tag_ty).fmt(pt), }); }; @@ -36645,7 +36619,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } - const layout = union_type.getLayout(ip); + const layout = union_type.flagsUnordered(ip).layout; if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) { @@ -36688,7 +36662,8 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L union_type.setFieldAligns(ip, field_aligns.items); if (explicit_tags_seen.len > 0) { - const tag_info = ip.loadEnumType(union_type.tagTypePtr(ip).*); + const tag_ty = union_type.tagTypeUnordered(ip); + const tag_info = ip.loadEnumType(tag_ty); if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(src, "enum field(s) missing in union", .{}); @@ -36696,21 +36671,21 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L for (tag_info.names.get(ip), 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; - try sema.addFieldErrNote(Type.fromInterned(union_type.tagTypePtr(ip).*), field_index, msg, "field '{}' missing, declared here", .{ + try sema.addFieldErrNote(Type.fromInterned(tag_ty), field_index, msg, "field '{}' missing, declared here", .{ field_name.fmt(ip), }); } - try sema.addDeclaredHereNote(msg, Type.fromInterned(union_type.tagTypePtr(ip).*)); + try sema.addDeclaredHereNote(msg, Type.fromInterned(tag_ty)); break :msg msg; }; return sema.failWithOwnedErrorMsg(&block_scope, msg); } } else if (enum_field_vals.count() > 0) { const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), zcu.declPtr(union_type.decl)); - union_type.tagTypePtr(ip).* = enum_ty; + union_type.setTagType(ip, enum_ty); } else { const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, zcu.declPtr(union_type.decl)); - union_type.tagTypePtr(ip).* = enum_ty; + union_type.setTagType(ip, enum_ty); } try sema.flushExports(); @@ -37086,7 +37061,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { try ty.resolveLayout(pt); const union_obj = ip.loadUnionType(ty.toIntern()); - const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypePtr(ip).*))) orelse + const tag_val = (try sema.typeHasOnePossibleValue(Type.fromInterned(union_obj.tagTypeUnordered(ip)))) orelse return null; if (union_obj.field_types.len == 0) { const only = try pt.intern(.{ .empty_enum_value = ty.toIntern() }); diff --git a/src/Type.zig b/src/Type.zig index 65176e9a80..2c3d59ba56 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -605,17 +605,15 @@ pub fn hasRuntimeBitsAdvanced( .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { + const union_flags = union_type.flagsUnordered(ip); + switch (union_flags.runtime_tag) { .none => { - if (union_type.flagsPtr(ip).status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_type.flagsPtr(ip).assumed_runtime_bits = true; - return true; - } + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + if (union_type.assumeRuntimeBitsIfFieldTypesWip(ip)) return true; }, .safety, .tagged => { - const tag_ty = union_type.tagTypePtr(ip).*; + const tag_ty = union_type.tagTypeUnordered(ip); // tag_ty will be `none` if this union's tag type is not resolved yet, // in which case we want control flow to continue down below. if (tag_ty != .none and @@ -627,8 +625,8 @@ pub fn hasRuntimeBitsAdvanced( } switch (strat) { .sema => try ty.resolveFields(pt), - .eager => assert(union_type.flagsPtr(ip).status.haveFieldTypes()), - .lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes()) + .eager => assert(union_flags.status.haveFieldTypes()), + .lazy => if (!union_flags.status.haveFieldTypes()) return error.NeedLazy, } for (0..union_type.field_types.len) |field_index| { @@ -745,8 +743,8 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { }, .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - return switch (union_type.flagsPtr(ip).runtime_tag) { - .none, .safety => union_type.flagsPtr(ip).layout != .auto, + return switch (union_type.flagsUnordered(ip).runtime_tag) { + .none, .safety => union_type.flagsUnordered(ip).layout != .auto, .tagged => false, }; }, @@ -1045,7 +1043,7 @@ pub fn abiAlignmentAdvanced( if (struct_type.layout == .@"packed") { switch (strat) { .sema => try ty.resolveLayout(pt), - .lazy => if (struct_type.backingIntType(ip).* == .none) return .{ + .lazy => if (struct_type.backingIntTypeUnordered(ip) == .none) return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_align = ty.toIntern() }, @@ -1053,10 +1051,10 @@ pub fn abiAlignmentAdvanced( }, .eager => {}, } - return .{ .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiAlignment(pt) }; + return .{ .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiAlignment(pt) }; } - if (struct_type.flagsPtr(ip).alignment == .none) switch (strat) { + if (struct_type.flagsUnordered(ip).alignment == .none) switch (strat) { .eager => unreachable, // struct alignment not resolved .sema => try ty.resolveStructAlignment(pt), .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ @@ -1065,7 +1063,7 @@ pub fn abiAlignmentAdvanced( } })) }, }; - return .{ .scalar = struct_type.flagsPtr(ip).alignment }; + return .{ .scalar = struct_type.flagsUnordered(ip).alignment }; }, .anon_struct_type => |tuple| { var big_align: Alignment = .@"1"; @@ -1088,7 +1086,7 @@ pub fn abiAlignmentAdvanced( .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - if (union_type.flagsPtr(ip).alignment == .none) switch (strat) { + if (union_type.flagsUnordered(ip).alignment == .none) switch (strat) { .eager => unreachable, // union layout not resolved .sema => try ty.resolveUnionAlignment(pt), .lazy => return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ @@ -1097,7 +1095,7 @@ pub fn abiAlignmentAdvanced( } })) }, }; - return .{ .scalar = union_type.flagsPtr(ip).alignment }; + return .{ .scalar = union_type.flagsUnordered(ip).alignment }; }, .opaque_type => return .{ .scalar = .@"1" }, .enum_type => return .{ @@ -1420,7 +1418,7 @@ pub fn abiSizeAdvanced( .sema => try ty.resolveLayout(pt), .lazy => switch (struct_type.layout) { .@"packed" => { - if (struct_type.backingIntType(ip).* == .none) return .{ + if (struct_type.backingIntTypeUnordered(ip) == .none) return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, @@ -1440,11 +1438,11 @@ pub fn abiSizeAdvanced( } switch (struct_type.layout) { .@"packed" => return .{ - .scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(pt), + .scalar = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).abiSize(pt), }, .auto, .@"extern" => { assert(struct_type.haveLayout(ip)); - return .{ .scalar = struct_type.size(ip).* }; + return .{ .scalar = struct_type.sizeUnordered(ip) }; }, } }, @@ -1464,7 +1462,7 @@ pub fn abiSizeAdvanced( const union_type = ip.loadUnionType(ty.toIntern()); switch (strat) { .sema => try ty.resolveLayout(pt), - .lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{ + .lazy => if (!union_type.flagsUnordered(ip).status.haveLayout()) return .{ .val = Value.fromInterned(try pt.intern(.{ .int = .{ .ty = .comptime_int_type, .storage = .{ .lazy_size = ty.toIntern() }, @@ -1474,7 +1472,7 @@ pub fn abiSizeAdvanced( } assert(union_type.haveLayout(ip)); - return .{ .scalar = union_type.size(ip).* }; + return .{ .scalar = union_type.sizeUnordered(ip) }; }, .opaque_type => unreachable, // no size available .enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(pt) }, @@ -1788,7 +1786,7 @@ pub fn bitSizeAdvanced( if (is_packed) try ty.resolveLayout(pt); } if (is_packed) { - return try Type.fromInterned(struct_type.backingIntType(ip).*).bitSizeAdvanced(pt, strat); + return try Type.fromInterned(struct_type.backingIntTypeUnordered(ip)).bitSizeAdvanced(pt, strat); } return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; }, @@ -1808,7 +1806,7 @@ pub fn bitSizeAdvanced( if (!is_packed) { return (try ty.abiSizeAdvanced(pt, strat_lazy)).scalar * 8; } - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + assert(union_type.flagsUnordered(ip).status.haveFieldTypes()); var size: u64 = 0; for (0..union_type.field_types.len) |field_index| { @@ -2056,9 +2054,10 @@ pub fn unionTagType(ty: Type, mod: *Module) ?Type { else => return null, } const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).runtime_tag) { + const union_flags = union_type.flagsUnordered(ip); + switch (union_flags.runtime_tag) { .tagged => { - assert(union_type.flagsPtr(ip).status.haveFieldTypes()); + assert(union_flags.status.haveFieldTypes()); return Type.fromInterned(union_type.enum_tag_ty); }, else => return null, @@ -2135,7 +2134,7 @@ pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout return switch (ip.indexToKey(ty.toIntern())) { .struct_type => ip.loadStructType(ty.toIntern()).layout, .anon_struct_type => .auto, - .union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout, + .union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout, else => unreachable, }; } @@ -2157,7 +2156,7 @@ pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { .anyerror_type, .adhoc_inferred_error_set_type => false, else => switch (ip.indexToKey(ty.toIntern())) { .error_set_type => |error_set_type| error_set_type.names.len == 0, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) { .none, .anyerror_type => false, else => |t| ip.indexToKey(t).error_set_type.names.len == 0, }, @@ -2175,7 +2174,7 @@ pub fn isAnyError(ty: Type, mod: *Module) bool { .anyerror_type => true, .adhoc_inferred_error_set_type => false, else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, + .inferred_error_set_type => |i| ip.funcIesResolvedUnordered(i) == .anyerror_type, else => false, }, }; @@ -2200,7 +2199,7 @@ pub fn errorSetHasFieldIp( .anyerror_type => true, else => switch (ip.indexToKey(ty)) { .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) { .anyerror_type => true, .none => false, else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, @@ -2336,7 +2335,7 @@ pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type, - .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*), + .struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntTypeUnordered(ip)), .enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), .vector_type => |vector_type| ty = Type.fromInterned(vector_type.child), @@ -2826,17 +2825,18 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se return false; // A struct with no fields is not comptime-only. - return switch (struct_type.flagsPtr(ip).requires_comptime) { + return switch (struct_type.setRequiresComptimeWip(ip)) { .no, .wip => false, .yes => true, .unknown => { assert(strat == .sema); - if (struct_type.flagsPtr(ip).field_types_wip) + if (struct_type.flagsUnordered(ip).field_types_wip) { + struct_type.setRequiresComptime(ip, .unknown); return false; + } - struct_type.flagsPtr(ip).requires_comptime = .wip; - errdefer struct_type.flagsPtr(ip).requires_comptime = .unknown; + errdefer struct_type.setRequiresComptime(ip, .unknown); try ty.resolveFields(pt); @@ -2849,12 +2849,12 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se // be considered resolved. Comptime-only types // still maintain a layout of their // runtime-known fields. - struct_type.flagsPtr(ip).requires_comptime = .yes; + struct_type.setRequiresComptime(ip, .yes); return true; } } - struct_type.flagsPtr(ip).requires_comptime = .no; + struct_type.setRequiresComptime(ip, .no); return false; }, }; @@ -2870,29 +2870,30 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, strat: ResolveStrat) Se .union_type => { const union_type = ip.loadUnionType(ty.toIntern()); - switch (union_type.flagsPtr(ip).requires_comptime) { + switch (union_type.setRequiresComptimeWip(ip)) { .no, .wip => return false, .yes => return true, .unknown => { assert(strat == .sema); - if (union_type.flagsPtr(ip).status == .field_types_wip) + if (union_type.flagsUnordered(ip).status == .field_types_wip) { + union_type.setRequiresComptime(ip, .unknown); return false; + } - union_type.flagsPtr(ip).requires_comptime = .wip; - errdefer union_type.flagsPtr(ip).requires_comptime = .unknown; + errdefer union_type.setRequiresComptime(ip, .unknown); try ty.resolveFields(pt); for (0..union_type.field_types.len) |field_idx| { const field_ty = union_type.field_types.get(ip)[field_idx]; if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(pt, strat)) { - union_type.flagsPtr(ip).requires_comptime = .yes; + union_type.setRequiresComptime(ip, .yes); return true; } } - union_type.flagsPtr(ip).requires_comptime = .no; + union_type.setRequiresComptime(ip, .no); return false; }, } @@ -3117,7 +3118,7 @@ pub fn errorSetNames(ty: Type, mod: *Module) InternPool.NullTerminatedString.Sli const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .error_set_type => |x| x.names, - .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) { .none => unreachable, // unresolved inferred error set .anyerror_type => unreachable, else => |t| ip.indexToKey(t).error_set_type.names, @@ -3374,7 +3375,7 @@ pub fn isTuple(ty: Type, mod: *Module) bool { const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") return false; if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; + return struct_type.flagsUnordered(ip).is_tuple; }, .anon_struct_type => |anon_struct| anon_struct.names.len == 0, else => false, @@ -3396,7 +3397,7 @@ pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") return false; if (struct_type.decl == .none) return false; - return struct_type.flagsPtr(ip).is_tuple; + return struct_type.flagsUnordered(ip).is_tuple; }, .anon_struct_type => true, else => false, diff --git a/src/Value.zig b/src/Value.zig index da1151139f..69b09a203e 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -558,7 +558,7 @@ pub fn writeToPackedMemory( }, .Union => { const union_obj = mod.typeToUnion(ty).?; - switch (union_obj.getLayout(ip)) { + switch (union_obj.flagsUnordered(ip).layout) { .auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory .@"packed" => { if (val.unionTag(mod)) |union_tag| { diff --git a/src/Zcu.zig b/src/Zcu.zig index 6a9812d736..15f418c6fe 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2968,7 +2968,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) const is_outdated = mod.outdated.contains(func_as_depender) or mod.potentially_outdated.contains(func_as_depender); - switch (func.analysis(ip).state) { + switch (func.analysisUnordered(ip).state) { .none => {}, .queued => return, // As above, we don't need to forward errors here. @@ -2983,13 +2983,13 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .analyze_func = func_index }); + try mod.comp.queueJob(.{ .analyze_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); + try mod.comp.queueJob(.{ .emit_h_decl = decl_index }); } - func.analysis(ip).state = .queued; + func.setAnalysisState(ip, .queued); } pub const SemaDeclResult = packed struct { diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index f6a47f626b..59b6b6bf0b 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -641,8 +641,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter // We'll want to remember what the IES used to be before the update for // dependency invalidation purposes. - const old_resolved_ies = if (func.analysis(ip).inferred_error_set) - func.resolvedErrorSet(ip).* + const old_resolved_ies = if (func.analysisUnordered(ip).inferred_error_set) + func.resolvedErrorSetUnordered(ip) else .none; @@ -671,7 +671,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter zcu.deleteUnitReferences(func_as_depender); } - switch (func.analysis(ip).state) { + switch (func.analysisUnordered(ip).state) { .success => if (!was_outdated) return, .sema_failure, .dependency_failure, @@ -693,11 +693,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter var air = pt.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { - if (func.analysis(ip).state == .in_progress) { + if (func.analysisUnordered(ip).state == .in_progress) { // If this decl caused the compile error, the analysis field would // be changed to indicate it was this Decl's fault. Because this // did not happen, we infer here that it was a dependency failure. - func.analysis(ip).state = .dependency_failure; + func.setAnalysisState(ip, .dependency_failure); } return error.AnalysisFail; }, @@ -707,8 +707,8 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const invalidate_ies_deps = i: { if (!was_outdated) break :i false; - if (!func.analysis(ip).inferred_error_set) break :i true; - const new_resolved_ies = func.resolvedErrorSet(ip).*; + if (!func.analysisUnordered(ip).inferred_error_set) break :i true; + const new_resolved_ies = func.resolvedErrorSetUnordered(ip); break :i new_resolved_ies != old_resolved_ies; }; if (invalidate_ies_deps) { @@ -729,7 +729,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter return; } - try comp.work_queue.writeItem(.{ .codegen_func = .{ + try comp.queueJob(.{ .codegen_func = .{ .func = func_index, .air = air, } }); @@ -783,7 +783,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai .{@errorName(err)}, ), ); - func.analysis(ip).state = .codegen_failure; + func.setAnalysisState(ip, .codegen_failure); return; }, }; @@ -797,12 +797,12 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai // Correcting this failure will involve changing a type this function // depends on, hence triggering re-analysis of this function, so this // interacts correctly with incremental compilation. - func.analysis(ip).state = .codegen_failure; + func.setAnalysisState(ip, .codegen_failure); } else if (comp.bin_file) |lf| { lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - func.analysis(ip).state = .codegen_failure; + func.setAnalysisState(ip, .codegen_failure); }, else => { try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); @@ -812,7 +812,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai "unable to codegen: {s}", .{@errorName(err)}, )); - func.analysis(ip).state = .codegen_failure; + func.setAnalysisState(ip, .codegen_failure); try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); }, }; @@ -903,7 +903,7 @@ fn getFileRootStruct( decl.analysis = .complete; try pt.scanNamespace(namespace_index, decls, decl); - try zcu.comp.work_queue.writeItem(.{ .resolve_type_fully = wip_ty.index }); + try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); } @@ -1080,7 +1080,7 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { const old_linksection = decl.@"linksection"; const old_addrspace = decl.@"addrspace"; const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func| - prev_func.analysis(ip).state == .inline_only + prev_func.analysisUnordered(ip).state == .inline_only else false; @@ -1311,10 +1311,10 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { // codegen backend wants full access to the Decl Type. try decl_ty.resolveFully(pt); - try zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); + try zcu.comp.queueJob(.{ .codegen_decl = decl_index }); if (result.invalidate_decl_ref and zcu.emit_h != null) { - try zcu.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); + try zcu.comp.queueJob(.{ .emit_h_decl = decl_index }); } } @@ -1740,8 +1740,6 @@ pub fn scanNamespace( var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer seen_decls.deinit(gpa); - try zcu.comp.work_queue.ensureUnusedCapacity(decls.len); - namespace.decls.clearRetainingCapacity(); try namespace.decls.ensureTotalCapacity(gpa, decls.len); @@ -1967,7 +1965,7 @@ const ScanDeclIter = struct { log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, }); - comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index }); + try comp.queueJob(.{ .analyze_decl = decl_index }); } } @@ -1976,7 +1974,7 @@ const ScanDeclIter = struct { // updated line numbers. Look into this! // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. - comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); + try comp.queueJob(.{ .update_line_number = decl_index }); } } }; @@ -1991,7 +1989,7 @@ pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { /// Finalize the creation of an anon decl. pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void { if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) { - try pt.zcu.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); + try pt.zcu.comp.queueJob(.{ .codegen_decl = decl_index }); } } @@ -2037,7 +2035,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type), .fn_ret_ty_ies = null, .owner_func_index = func_index, - .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), + .branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota), .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -2047,14 +2045,14 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? }); try sema.declareDependency(.{ .decl_val = decl_index }); - if (func.analysis(ip).inferred_error_set) { + if (func.analysisUnordered(ip).inferred_error_set) { const ies = try arena.create(Sema.InferredErrorSet); ies.* = .{ .func = func_index }; sema.fn_ret_ty_ies = ies; } // reset in case calls to errorable functions are removed. - func.analysis(ip).calls_or_awaits_errorable_fn = false; + func.setCallsOrAwaitsErrorableFn(ip, false); // First few indexes of extra are reserved and set at the end. const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; @@ -2080,7 +2078,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }; defer inner_block.instructions.deinit(gpa); - const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).resolve(ip)); + const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip)); // Here we are performing "runtime semantic analysis" for a function body, which means // we must map the parameter ZIR instructions to `arg` AIR instructions. @@ -2149,7 +2147,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }); } - func.analysis(ip).state = .in_progress; + func.setAnalysisState(ip, .in_progress); const last_arg_index = inner_block.instructions.items.len; @@ -2176,7 +2174,7 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All } // If we don't get an error return trace from a caller, create our own. - if (func.analysis(ip).calls_or_awaits_errorable_fn and + if (func.analysisUnordered(ip).calls_or_awaits_errorable_fn and mod.comp.config.any_error_tracing and !sema.fn_ret_ty.isError(mod)) { @@ -2218,10 +2216,10 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All else => |e| return e, }; assert(ies.resolved != .none); - ip.funcIesResolved(func_index).* = ies.resolved; + ip.funcSetIesResolved(func_index, ies.resolved); } - func.analysis(ip).state = .success; + func.setAnalysisState(ip, .success); // Finally we must resolve the return type and parameter types so that backends // have full access to type information. @@ -2415,6 +2413,7 @@ fn processExportsInner( ) error{OutOfMemory}!void { const zcu = pt.zcu; const gpa = zcu.gpa; + const ip = &zcu.intern_pool; for (export_indices) |export_idx| { const new_export = &zcu.all_exports.items[export_idx]; @@ -2423,7 +2422,7 @@ fn processExportsInner( new_export.status = .failed_retryable; try zcu.failed_exports.ensureUnusedCapacity(gpa, 1); const msg = try Zcu.ErrorMsg.create(gpa, new_export.src, "exported symbol collision: {}", .{ - new_export.opts.name.fmt(&zcu.intern_pool), + new_export.opts.name.fmt(ip), }); errdefer msg.destroy(gpa); const other_export = zcu.all_exports.items[gop.value_ptr.*]; @@ -2443,8 +2442,7 @@ fn processExportsInner( if (!decl.owns_tv) break :failed false; if (decl.typeOf(zcu).zigTypeTag(zcu) != .Fn) break :failed false; // Check if owned function failed - const a = zcu.funcInfo(decl.val.toIntern()).analysis(&zcu.intern_pool); - break :failed a.state != .success; + break :failed zcu.funcInfo(decl.val.toIntern()).analysisUnordered(ip).state != .success; }) { // This `Decl` is failed, so was never sent to codegen. // TODO: we should probably tell the backend to delete any old exports of this `Decl`? @@ -3072,7 +3070,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp most_aligned_field_size = field_size; } } - const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag(); + const have_tag = loaded_union.flagsUnordered(ip).runtime_tag.hasTag(); if (!have_tag or !Type.fromInterned(loaded_union.enum_tag_ty).hasRuntimeBits(pt)) { return .{ .abi_size = payload_align.forward(payload_size), @@ -3091,7 +3089,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp const tag_size = Type.fromInterned(loaded_union.enum_tag_ty).abiSize(pt); const tag_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(pt).max(.@"1"); return .{ - .abi_size = loaded_union.size(ip).*, + .abi_size = loaded_union.sizeUnordered(ip), .abi_align = tag_align.max(payload_align), .most_aligned_field = most_aligned_field, .most_aligned_field_size = most_aligned_field_size, @@ -3100,7 +3098,7 @@ pub fn getUnionLayout(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionTyp .payload_align = payload_align, .tag_align = tag_align, .tag_size = tag_size, - .padding = loaded_union.padding(ip).*, + .padding = loaded_union.paddingUnordered(ip), }; } @@ -3142,7 +3140,7 @@ pub fn unionFieldNormalAlignmentAdvanced( strat: Type.ResolveStrat, ) Zcu.SemaError!InternPool.Alignment { const ip = &pt.zcu.intern_pool; - assert(loaded_union.flagsPtr(ip).layout != .@"packed"); + assert(loaded_union.flagsUnordered(ip).layout != .@"packed"); const field_align = loaded_union.fieldAlign(ip, field_index); if (field_align != .none) return field_align; const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index da474c1261..1400d835e0 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -56,7 +56,7 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread, ctx: Context) Class { .Union => { const bit_size = ty.bitSize(pt); const union_obj = pt.zcu.typeToUnion(ty).?; - if (union_obj.getLayout(ip) == .@"packed") { + if (union_obj.flagsUnordered(ip).layout == .@"packed") { if (bit_size > 64) return .memory; return .byval; } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 70876a298b..c5ae354f54 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -768,7 +768,7 @@ pub fn generate( @intFromEnum(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = func.analysis(ip).stack_alignment.max(.@"1"), + .alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"), }), ); function.frame_allocs.set( diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 32b3b42389..a969294789 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1011,7 +1011,7 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype { }, .Struct => { if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| { - return typeToValtype(Type.fromInterned(packed_struct.backingIntType(ip).*), pt); + return typeToValtype(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt); } else { return wasm.Valtype.i32; } @@ -1746,7 +1746,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool { => return ty.hasRuntimeBitsIgnoreComptime(pt), .Union => { if (mod.typeToUnion(ty)) |union_obj| { - if (union_obj.getLayout(ip) == .@"packed") { + if (union_obj.flagsUnordered(ip).layout == .@"packed") { return ty.abiSize(pt) > 8; } } @@ -1754,7 +1754,7 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool { }, .Struct => { if (mod.typeToPackedStruct(ty)) |packed_struct| { - return isByRef(Type.fromInterned(packed_struct.backingIntType(ip).*), pt); + return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt); } return ty.hasRuntimeBitsIgnoreComptime(pt); }, @@ -3377,7 +3377,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { assert(struct_type.layout == .@"packed"); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable; - const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*); + const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip)); const int_val = try pt.intValue( backing_int_ty, mem.readInt(u64, &buf, .little), @@ -3443,7 +3443,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { }, .Struct => { const packed_struct = mod.typeToPackedStruct(ty).?; - return func.emitUndefined(Type.fromInterned(packed_struct.backingIntType(ip).*)); + return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip))); }, else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}), } @@ -3974,7 +3974,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Struct => result: { const packed_struct = mod.typeToPackedStruct(struct_ty).?; const offset = pt.structPackedFieldBitOffset(packed_struct, field_index); - const backing_ty = Type.fromInterned(packed_struct.backingIntType(ip).*); + const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; @@ -5377,7 +5377,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const packed_struct = mod.typeToPackedStruct(result_ty).?; const field_types = packed_struct.field_types; - const backing_type = Type.fromInterned(packed_struct.backingIntType(ip).*); + const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); // ensure the result is zero'd const result = try func.allocLocal(backing_type); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 1b61be1e84..f4356275ab 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -71,7 +71,7 @@ pub fn classifyType(ty: Type, pt: Zcu.PerThread) [2]Class { }, .Union => { const union_obj = pt.zcu.typeToUnion(ty).?; - if (union_obj.getLayout(ip) == .@"packed") { + if (union_obj.flagsUnordered(ip).layout == .@"packed") { if (ty.bitSize(pt) <= 64) return direct; return .{ .direct, .direct }; } @@ -107,7 +107,7 @@ pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type { switch (ty.zigTypeTag(mod)) { .Struct => { if (mod.typeToPackedStruct(ty)) |packed_struct| { - return scalarType(Type.fromInterned(packed_struct.backingIntType(ip).*), pt); + return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt); } else { assert(ty.structFieldCount(mod) == 1); return scalarType(ty.structFieldType(0, mod), pt); @@ -115,7 +115,7 @@ pub fn scalarType(ty: Type, pt: Zcu.PerThread) Type { }, .Union => { const union_obj = mod.typeToUnion(ty).?; - if (union_obj.getLayout(ip) != .@"packed") { + if (union_obj.flagsUnordered(ip).layout != .@"packed") { const layout = pt.getUnionLayout(union_obj); if (layout.payload_size == 0 and layout.tag_size != 0) { return scalarType(ty.unionTagTypeSafety(mod).?, pt); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 92aac552d8..dfee2a3bf9 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -856,7 +856,7 @@ pub fn generate( @intFromEnum(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = func.analysis(ip).stack_alignment.max(.@"1"), + .alignment = func.analysisUnordered(ip).stack_alignment.max(.@"1"), }), ); function.frame_allocs.set( diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 6f4bd6f356..1089b6db8a 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -349,7 +349,7 @@ fn classifySystemVStruct( .@"packed" => {}, } } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| { - switch (field_loaded_union.getLayout(ip)) { + switch (field_loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, pt, target); continue; @@ -362,11 +362,11 @@ fn classifySystemVStruct( result_class.* = result_class.combineSystemV(field_class); byte_offset += field_ty.abiSize(pt); } - const final_byte_offset = starting_byte_offset + loaded_struct.size(ip).*; + const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip); std.debug.assert(final_byte_offset == std.mem.alignForward( u64, byte_offset, - loaded_struct.flagsPtr(ip).alignment.toByteUnits().?, + loaded_struct.flagsUnordered(ip).alignment.toByteUnits().?, )); return final_byte_offset; } @@ -390,7 +390,7 @@ fn classifySystemVUnion( .@"packed" => {}, } } else if (pt.zcu.typeToUnion(field_ty)) |field_loaded_union| { - switch (field_loaded_union.getLayout(ip)) { + switch (field_loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, pt, target); continue; @@ -402,7 +402,7 @@ fn classifySystemVUnion( for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class| result_class.* = result_class.combineSystemV(field_class); } - return starting_byte_offset + loaded_union.size(ip).*; + return starting_byte_offset + loaded_union.sizeUnordered(ip); } pub const SysV = struct { diff --git a/src/codegen.zig b/src/codegen.zig index d05cb42728..9e0f4db305 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -548,8 +548,8 @@ pub fn generateSymbol( } } - const size = struct_type.size(ip).*; - const alignment = struct_type.flagsPtr(ip).alignment.toByteUnits().?; + const size = struct_type.sizeUnordered(ip); + const alignment = struct_type.flagsUnordered(ip).alignment.toByteUnits().?; const padding = math.cast( usize, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 60b07f0e3f..9f77e7327c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1366,7 +1366,7 @@ pub const DeclGen = struct { const loaded_union = ip.loadUnionType(ty.toIntern()); if (un.tag == .none) { const backing_ty = try ty.unionBackingType(pt); - switch (loaded_union.getLayout(ip)) { + switch (loaded_union.flagsUnordered(ip).layout) { .@"packed" => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -1401,7 +1401,7 @@ pub const DeclGen = struct { const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?; const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; - if (loaded_union.getLayout(ip) == .@"packed") { + if (loaded_union.flagsUnordered(ip).layout == .@"packed") { if (field_ty.hasRuntimeBits(pt)) { if (field_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); @@ -1629,7 +1629,7 @@ pub const DeclGen = struct { }, .union_type => { const loaded_union = ip.loadUnionType(ty.toIntern()); - switch (loaded_union.getLayout(ip)) { + switch (loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -1792,7 +1792,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_val.getFunction(zcu)) |func| if (func.analysis(ip).is_cold) + if (fn_val.getFunction(zcu)) |func| if (func.analysisUnordered(ip).is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); @@ -5527,7 +5527,7 @@ fn fieldLocation( .{ .field = field_index } }, .union_type => { const loaded_union = ip.loadUnionType(container_ty.toIntern()); - switch (loaded_union.getLayout(ip)) { + switch (loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(pt)) @@ -5763,7 +5763,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .{ .field = extra.field_index }, .union_type => field_name: { const loaded_union = ip.loadUnionType(struct_ty.toIntern()); - switch (loaded_union.getLayout(ip)) { + switch (loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index]; break :field_name if (loaded_union.hasTag(ip)) @@ -7267,7 +7267,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, union_ty); - if (loaded_union.getLayout(ip) == .@"packed") return f.moveCValue(inst, union_ty, payload); + if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload); const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: { const layout = union_ty.unionGetLayout(pt); diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 6d98aaafcb..ecd1b8c2f7 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -1744,7 +1744,7 @@ pub const Pool = struct { .@"packed" => return pool.fromType( allocator, scratch, - Type.fromInterned(loaded_struct.backingIntType(ip).*), + Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip)), pt, mod, kind, @@ -1817,7 +1817,7 @@ pub const Pool = struct { }, .union_type => { const loaded_union = ip.loadUnionType(ip_index); - switch (loaded_union.getLayout(ip)) { + switch (loaded_union.flagsUnordered(ip).layout) { .auto, .@"extern" => { const has_tag = loaded_union.hasTag(ip); const fwd_decl = try pool.getFwdDecl(allocator, .{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index abc6a9dc9d..b2d32fb539 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1086,7 +1086,7 @@ pub const Object = struct { // If there is no such function in the module, it means the source code does not need it. const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return; const llvm_fn = o.builder.getGlobal(name) orelse return; - const errors_len = o.pt.zcu.intern_pool.global_error_set.mutate.list.len; + const errors_len = o.pt.zcu.intern_pool.global_error_set.getNamesFromMainThread().len; var wip = try Builder.WipFunction.init(&o.builder, .{ .function = llvm_fn.ptrConst(&o.builder).kind.function, @@ -1385,13 +1385,14 @@ pub const Object = struct { var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder); defer attributes.deinit(&o.builder); - if (func.analysis(ip).is_noinline) { + const func_analysis = func.analysisUnordered(ip); + if (func_analysis.is_noinline) { try attributes.addFnAttr(.@"noinline", &o.builder); } else { _ = try attributes.removeFnAttr(.@"noinline"); } - const stack_alignment = func.analysis(ip).stack_alignment; + const stack_alignment = func.analysisUnordered(ip).stack_alignment; if (stack_alignment != .none) { try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder); try attributes.addFnAttr(.@"noinline", &o.builder); @@ -1399,7 +1400,7 @@ pub const Object = struct { _ = try attributes.removeFnAttr(.alignstack); } - if (func.analysis(ip).is_cold) { + if (func_analysis.is_cold) { try attributes.addFnAttr(.cold, &o.builder); } else { _ = try attributes.removeFnAttr(.cold); @@ -1624,7 +1625,7 @@ pub const Object = struct { llvm_arg_i += 1; const alignment = param_ty.abiAlignment(pt).toLlvm(); - const arg_ptr = try buildAllocaInner(&wip, param_llvm_ty, alignment, target); + const arg_ptr = try buildAllocaInner(&wip, param.typeOfWip(&wip), alignment, target); _ = try wip.store(.normal, param, arg_ptr, alignment); args.appendAssumeCapacity(if (isByRef(param_ty, pt)) @@ -2403,7 +2404,7 @@ pub const Object = struct { defer gpa.free(name); if (zcu.typeToPackedStruct(ty)) |struct_type| { - const backing_int_ty = struct_type.backingIntType(ip).*; + const backing_int_ty = struct_type.backingIntTypeUnordered(ip); if (backing_int_ty != .none) { const info = Type.fromInterned(backing_int_ty).intInfo(zcu); const builder_name = try o.builder.metadataString(name); @@ -2615,7 +2616,7 @@ pub const Object = struct { if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(pt)) continue; const field_size = Type.fromInterned(field_ty).abiSize(pt); - const field_align: InternPool.Alignment = switch (union_type.flagsPtr(ip).layout) { + const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) { .@"packed" => .none, .auto, .@"extern" => pt.unionFieldNormalAlignment(union_type, @intCast(field_index)), }; @@ -3303,7 +3304,7 @@ pub const Object = struct { const struct_type = ip.loadStructType(t.toIntern()); if (struct_type.layout == .@"packed") { - const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*)); + const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntTypeUnordered(ip))); try o.type_map.put(o.gpa, t.toIntern(), int_ty); return int_ty; } @@ -3346,7 +3347,7 @@ pub const Object = struct { // This is a zero-bit field. If there are runtime bits after this field, // map to the next LLVM field (which we know exists): otherwise, don't // map the field, indicating it's at the end of the struct. - if (offset != struct_type.size(ip).*) { + if (offset != struct_type.sizeUnordered(ip)) { try o.struct_field_map.put(o.gpa, .{ .struct_ty = t.toIntern(), .field_index = field_index, @@ -3450,7 +3451,7 @@ pub const Object = struct { const union_obj = ip.loadUnionType(t.toIntern()); const layout = pt.getUnionLayout(union_obj); - if (union_obj.flagsPtr(ip).layout == .@"packed") { + if (union_obj.flagsUnordered(ip).layout == .@"packed") { const int_ty = try o.builder.intType(@intCast(t.bitSize(pt))); try o.type_map.put(o.gpa, t.toIntern(), int_ty); return int_ty; @@ -3697,7 +3698,7 @@ pub const Object = struct { if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; - const container_layout = union_obj.getLayout(ip); + const container_layout = union_obj.flagsUnordered(ip).layout; assert(container_layout == .@"packed"); @@ -4205,7 +4206,7 @@ pub const Object = struct { if (layout.payload_size == 0) return o.lowerValue(un.tag); const union_obj = mod.typeToUnion(ty).?; - const container_layout = union_obj.getLayout(ip); + const container_layout = union_obj.flagsUnordered(ip).layout; var need_unnamed = false; const payload = if (un.tag != .none) p: { @@ -10045,7 +10046,7 @@ pub const FuncGen = struct { }, .Struct => { if (mod.typeToPackedStruct(result_ty)) |struct_type| { - const backing_int_ty = struct_type.backingIntType(ip).*; + const backing_int_ty = struct_type.backingIntTypeUnordered(ip); assert(backing_int_ty != .none); const big_bits = Type.fromInterned(backing_int_ty).bitSize(pt); const int_ty = try o.builder.intType(@intCast(big_bits)); @@ -10155,7 +10156,7 @@ pub const FuncGen = struct { const layout = union_ty.unionGetLayout(pt); const union_obj = mod.typeToUnion(union_ty).?; - if (union_obj.getLayout(ip) == .@"packed") { + if (union_obj.flagsUnordered(ip).layout == .@"packed") { const big_bits = union_ty.bitSize(pt); const int_llvm_ty = try o.builder.intType(@intCast(big_bits)); const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]); @@ -11281,7 +11282,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E .struct_type => { const struct_type = ip.loadStructType(return_type.toIntern()); assert(struct_type.haveLayout(ip)); - const size: u64 = struct_type.size(ip).*; + const size: u64 = struct_type.sizeUnordered(ip); assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index); if (size % 8 > 0) { types_buffer[types_index - 1] = try o.builder.intType(@intCast(size % 8 * 8)); @@ -11587,7 +11588,7 @@ const ParamTypeIterator = struct { .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); assert(struct_type.haveLayout(ip)); - const size: u64 = struct_type.size(ip).*; + const size: u64 = struct_type.sizeUnordered(ip); assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index); if (size % 8 > 0) { types_buffer[types_index - 1] = diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 7a45429da6..d9468cc8ea 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1463,7 +1463,7 @@ const DeclGen = struct { const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; - if (union_obj.getLayout(ip) == .@"packed") { + if (union_obj.flagsUnordered(ip).layout == .@"packed") { return self.todo("packed union types", .{}); } @@ -1735,7 +1735,7 @@ const DeclGen = struct { }; if (struct_type.layout == .@"packed") { - return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct); + return try self.resolveType(Type.fromInterned(struct_type.backingIntTypeUnordered(ip)), .direct); } var member_types = std.ArrayList(IdRef).init(self.gpa); @@ -5081,7 +5081,7 @@ const DeclGen = struct { const union_ty = mod.typeToUnion(ty).?; const tag_ty = Type.fromInterned(union_ty.enum_tag_ty); - if (union_ty.getLayout(ip) == .@"packed") { + if (union_ty.flagsUnordered(ip).layout == .@"packed") { unreachable; // TODO } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 84d26b7610..732ceb85a6 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1156,7 +1156,7 @@ pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - func.analysis(&mod.intern_pool).state = .codegen_failure; + func.setAnalysisState(&mod.intern_pool, .codegen_failure); try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 46c0fd23a3..e8ec5eb492 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1093,7 +1093,7 @@ pub fn updateFunc( const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - func.analysis(&mod.intern_pool).state = .codegen_failure; + func.setAnalysisState(&mod.intern_pool, .codegen_failure); try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 79e1ae4e02..b59b6a6720 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -699,7 +699,7 @@ pub fn updateFunc( const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - func.analysis(&mod.intern_pool).state = .codegen_failure; + func.setAnalysisState(&mod.intern_pool, .codegen_failure); try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 091aee54c4..e954bf7004 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -449,7 +449,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, const code = switch (res) { .ok => try code_buffer.toOwnedSlice(), .fail => |em| { - func.analysis(&mod.intern_pool).state = .codegen_failure; + func.setAnalysisState(&mod.intern_pool, .codegen_failure); try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); return; }, diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index f74705e17c..993e1309c3 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -1051,7 +1051,7 @@ fn setupErrorsLen(zig_object: *ZigObject, wasm_file: *Wasm) !void { const gpa = wasm_file.base.comp.gpa; const sym_index = zig_object.findGlobalSymbol("__zig_errors_len") orelse return; - const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.mutate.list.len; + const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.getNamesFromMainThread().len; // overwrite existing atom if it already exists (maybe the error set has increased) // if not, allcoate a new atom. const atom_index = if (wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = sym_index })) |index| blk: { diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index 8189ab66aa..78302c6a84 100644 Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ diff --git a/test/cases/compile_errors/bogus_method_call_on_slice.zig b/test/cases/compile_errors/bogus_method_call_on_slice.zig index 694993074c..5139b7550d 100644 --- a/test/cases/compile_errors/bogus_method_call_on_slice.zig +++ b/test/cases/compile_errors/bogus_method_call_on_slice.zig @@ -16,6 +16,6 @@ pub export fn entry2() void { // backend=stage2 // target=native // +// :3:6: error: no field or member function named 'copy' in '[]const u8' // :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})' // :12:18: error: no field or member function named 'bar' in 'struct{comptime foo: comptime_int = 1}' -// :3:6: error: no field or member function named 'copy' in '[]const u8' diff --git a/test/cases/compile_errors/compile_log.zig b/test/cases/compile_errors/compile_log.zig index ac89cfd1b3..bbc4b82657 100644 --- a/test/cases/compile_errors/compile_log.zig +++ b/test/cases/compile_errors/compile_log.zig @@ -17,14 +17,14 @@ export fn baz() void { // target=native // // :6:5: error: found compile log statement -// :12:5: note: also here // :6:5: note: also here +// :12:5: note: also here // // Compile Log Output: // @as(*const [5:0]u8, "begin") // @as(*const [1:0]u8, "a"), @as(i32, 12), @as(*const [1:0]u8, "b"), @as([]const u8, "hi"[0..2]) // @as(*const [3:0]u8, "end") -// @as(comptime_int, 4) // @as(*const [5:0]u8, "begin") // @as(*const [1:0]u8, "a"), @as(i32, [runtime value]), @as(*const [1:0]u8, "b"), @as([]const u8, [runtime value]) // @as(*const [3:0]u8, "end") +// @as(comptime_int, 4) diff --git a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig index fac09cc265..07fe34ad7f 100644 --- a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig +++ b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig @@ -18,6 +18,6 @@ comptime { // backend=stage2 // target=native // +// :1:15: error: comptime parameters not allowed in function with calling convention 'C' // :5:30: error: comptime parameters not allowed in function with calling convention 'C' // :6:30: error: generic parameters not allowed in function with calling convention 'C' -// :1:15: error: comptime parameters not allowed in function with calling convention 'C' diff --git a/test/cases/compile_errors/invalid_store_to_comptime_field.zig b/test/cases/compile_errors/invalid_store_to_comptime_field.zig index 6703ac14ca..f3082b06a9 100644 --- a/test/cases/compile_errors/invalid_store_to_comptime_field.zig +++ b/test/cases/compile_errors/invalid_store_to_comptime_field.zig @@ -82,6 +82,6 @@ pub export fn entry8() void { // :36:29: note: default value set here // :46:12: error: value stored in comptime field does not match the default value of the field // :55:25: error: value stored in comptime field does not match the default value of the field -// :68:36: error: value stored in comptime field does not match the default value of the field // :61:30: error: value stored in comptime field does not match the default value of the field // :59:29: note: default value set here +// :68:36: error: value stored in comptime field does not match the default value of the field diff --git a/test/cases/compile_errors/invalid_variadic_function.zig b/test/cases/compile_errors/invalid_variadic_function.zig index d0b6d2c8f5..ee8fc3439e 100644 --- a/test/cases/compile_errors/invalid_variadic_function.zig +++ b/test/cases/compile_errors/invalid_variadic_function.zig @@ -18,6 +18,6 @@ comptime { // // :1:1: error: variadic function does not support '.Unspecified' calling convention // :1:1: note: supported calling conventions: '.C' -// :2:1: error: generic function cannot be variadic // :1:1: error: variadic function does not support '.Inline' calling convention // :1:1: note: supported calling conventions: '.C' +// :2:1: error: generic function cannot be variadic diff --git a/test/cases/error_in_nested_declaration.zig b/test/cases/error_in_nested_declaration.zig index c2dd7b9fa8..2ef8bf13ec 100644 --- a/test/cases/error_in_nested_declaration.zig +++ b/test/cases/error_in_nested_declaration.zig @@ -26,6 +26,6 @@ pub export fn entry2() void { // backend=llvm // target=native // -// :17:12: error: C pointers cannot point to opaque types // :6:20: error: cannot @bitCast to '[]i32' // :6:20: note: use @ptrCast to cast from '[]u32' +// :17:12: error: C pointers cannot point to opaque types