diff --git a/src/Compilation.zig b/src/Compilation.zig index 81d150b03f..69cb2c4d6f 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -848,17 +848,18 @@ pub const RcIncludes = enum { const Job = union(enum) { /// Corresponds to the task in `link.Task`. /// Only needed for backends that haven't yet been updated to not race against Sema. - codegen_nav: InternPool.Nav.Index, + link_nav: InternPool.Nav.Index, + /// Corresponds to the task in `link.Task`. + /// TODO: this is currently also responsible for performing codegen. + /// Only needed for backends that haven't yet been updated to not race against Sema. + link_func: link.Task.CodegenFunc, /// Corresponds to the task in `link.Task`. /// Only needed for backends that haven't yet been updated to not race against Sema. - codegen_func: link.Task.CodegenFunc, - /// Corresponds to the task in `link.Task`. - /// Only needed for backends that haven't yet been updated to not race against Sema. - codegen_type: InternPool.Index, + link_type: InternPool.Index, update_line_number: InternPool.TrackedInst.Index, /// The `AnalUnit`, which is *not* a `func`, must be semantically analyzed. /// This may be its first time being analyzed, or it may be outdated. - /// If the unit is a function, a `codegen_func` job will then be queued. + /// If the unit is a test function, an `analyze_func` job will then be queued. analyze_comptime_unit: InternPool.AnalUnit, /// This function must be semantically analyzed. /// This may be its first time being analyzed, or it may be outdated. @@ -879,13 +880,13 @@ const Job = union(enum) { return switch (tag) { // Prioritize functions so that codegen can get to work on them on a // separate thread, while Sema goes back to its own work. - .resolve_type_fully, .analyze_func, .codegen_func => 0, + .resolve_type_fully, .analyze_func, .link_func => 0, else => 1, }; } comptime { // Job dependencies - assert(stage(.resolve_type_fully) <= stage(.codegen_func)); + assert(stage(.resolve_type_fully) <= stage(.link_func)); } }; @@ -4552,7 +4553,7 @@ pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void { fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void { switch (job) { - .codegen_nav => |nav_index| { + .link_nav => |nav_index| { const zcu = comp.zcu.?; const nav = zcu.intern_pool.getNav(nav_index); if (nav.analysis != null) { @@ -4562,16 +4563,16 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void { } } assert(nav.status == .fully_resolved); - comp.dispatchCodegenTask(tid, .{ .codegen_nav = nav_index }); + comp.dispatchLinkTask(tid, .{ .link_nav = nav_index }); }, - .codegen_func => |func| { - comp.dispatchCodegenTask(tid, .{ .codegen_func = func }); + .link_func => |func| { + comp.dispatchLinkTask(tid, .{ .link_func = func }); }, - .codegen_type => |ty| { - comp.dispatchCodegenTask(tid, .{ .codegen_type = ty }); + .link_type => |ty| { + comp.dispatchLinkTask(tid, .{ .link_type = ty }); }, .update_line_number => |ti| { - comp.dispatchCodegenTask(tid, .{ .update_line_number = ti }); + comp.dispatchLinkTask(tid, .{ .update_line_number = ti }); }, .analyze_func => |func| { const named_frame = tracy.namedFrame("analyze_func"); @@ -4665,7 +4666,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void { /// The reason for the double-queue here is that the first queue ensures any /// resolve_type_fully tasks are complete before this dispatch function is called. -fn dispatchCodegenTask(comp: *Compilation, tid: usize, link_task: link.Task) void { +fn dispatchLinkTask(comp: *Compilation, tid: usize, link_task: link.Task) void { if (comp.separateCodegenThreadOk()) { comp.queueLinkTasks(&.{link_task}); } else { diff --git a/src/Sema.zig b/src/Sema.zig index e20fb17f26..c9f307e624 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2991,7 +2991,7 @@ fn zirStructDecl( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); @@ -3250,7 +3250,7 @@ fn zirEnumDecl( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } return Air.internedToRef(wip_ty.index); } @@ -3368,7 +3368,7 @@ fn zirUnionDecl( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); @@ -3455,7 +3455,7 @@ fn zirOpaqueDecl( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } try sema.addTypeReferenceEntry(src, wip_ty.index); if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index); @@ -20086,7 +20086,7 @@ fn structInitAnon( codegen_type: { if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; - try zcu.comp.queueJob(.{ .codegen_type = wip.index }); + try zcu.comp.queueJob(.{ .link_type = wip.index }); } if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); break :ty wip.finish(ip, new_namespace_index); @@ -21396,7 +21396,7 @@ fn reifyEnum( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } return Air.internedToRef(wip_ty.index); } @@ -21650,7 +21650,7 @@ fn reifyUnion( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); @@ -22004,7 +22004,7 @@ fn reifyStruct( if (zcu.comp.config.use_llvm) break :codegen_type; if (block.ownerModule().strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } try sema.declareDependency(.{ .interned = wip_ty.index }); try sema.addTypeReferenceEntry(src, wip_ty.index); diff --git a/src/Sema/LowerZon.zig b/src/Sema/LowerZon.zig index 77065a07e8..192c2e2d56 100644 --- a/src/Sema/LowerZon.zig +++ b/src/Sema/LowerZon.zig @@ -194,7 +194,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter codegen_type: { if (pt.zcu.comp.config.use_llvm) break :codegen_type; if (self.block.ownerModule().strip) break :codegen_type; - try pt.zcu.comp.queueJob(.{ .codegen_type = wip.index }); + try pt.zcu.comp.queueJob(.{ .link_type = wip.index }); } break :ty wip.finish(ip, new_namespace_index); }, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 8b35d8d799..4b4ae98cb4 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1320,7 +1320,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr } // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_nav = nav_id }); + try zcu.comp.queueJob(.{ .link_nav = nav_id }); } switch (old_nav.status) { @@ -1716,7 +1716,7 @@ fn analyzeFuncBody( } // This job depends on any resolve_type_fully jobs queued up before it. - try comp.queueJob(.{ .codegen_func = .{ + try comp.queueJob(.{ .link_func = .{ .func = func_index, .air = air, } }); @@ -1880,7 +1880,7 @@ fn createFileRootStruct( if (zcu.comp.config.use_llvm) break :codegen_type; if (file.mod.?.strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } zcu.setFileRootType(file_index, wip_ty.index); if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index); @@ -3457,73 +3457,8 @@ pub fn populateTestFunctions( zcu.codegen_prog_node = std.Progress.Node.none; } - try pt.linkerUpdateNav(nav_index); - } -} - -pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void { - const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - - const nav = zcu.intern_pool.getNav(nav_index); - const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(ip), 0); - defer codegen_prog_node.end(); - - if (!Air.valFullyResolved(zcu.navValue(nav_index), zcu)) { - // The value of this nav failed to resolve. This is a transitive failure. - // TODO: do we need to mark this failure anywhere? I don't think so, since compilation - // will fail due to the type error anyway. - } else if (comp.bin_file) |lf| { - lf.updateNav(pt, nav_index) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)), - error.Overflow, error.RelocationNotByteAligned => { - try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( - gpa, - zcu.navSrcLoc(nav_index), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - // Not a retryable failure. - }, - }; - } else if (zcu.llvm_object) |llvm_object| { - llvm_object.updateNav(pt, nav_index) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - }; - } -} - -pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) error{OutOfMemory}!void { - const zcu = pt.zcu; - const gpa = zcu.gpa; - const comp = zcu.comp; - const ip = &zcu.intern_pool; - - const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0); - defer codegen_prog_node.end(); - - if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(gpa); - - if (!Air.typeFullyResolved(Type.fromInterned(ty), zcu)) { - // This type failed to resolve. This is a transitive failure. - return; - } - - if (comp.bin_file) |lf| lf.updateContainerType(pt, ty) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.TypeFailureReported => assert(zcu.failed_types.contains(ty)), - }; -} - -pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Index) !void { - if (pt.zcu.comp.bin_file) |lf| { - lf.updateLineNumber(pt, ti) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => |e| log.err("update line number failed: {s}", .{@errorName(e)}), - }; + // The linker thread is not running, so we actually need to dispatch this task directly. + @import("../link.zig").doTask(zcu.comp, @intFromEnum(pt.tid), .{ .link_nav = nav_index }); } } @@ -3984,7 +3919,7 @@ pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error! const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key); if (result.new_nav.unwrap()) |nav| { // This job depends on any resolve_type_fully jobs queued up before it. - try pt.zcu.comp.queueJob(.{ .codegen_nav = nav }); + try pt.zcu.comp.queueJob(.{ .link_nav = nav }); if (pt.zcu.comp.debugIncremental()) try pt.zcu.incremental_debug_state.newNav(pt.zcu, nav); } return result.index; @@ -4132,7 +4067,7 @@ fn recreateStructType( if (zcu.comp.config.use_llvm) break :codegen_type; if (file.mod.?.strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index); @@ -4225,7 +4160,7 @@ fn recreateUnionType( if (zcu.comp.config.use_llvm) break :codegen_type; if (file.mod.?.strip) break :codegen_type; // This job depends on any resolve_type_fully jobs queued up before it. - try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index }); + try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); } if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index); diff --git a/src/link.zig b/src/link.zig index 688210c355..7673b44e47 100644 --- a/src/link.zig +++ b/src/link.zig @@ -704,7 +704,7 @@ pub const File = struct { } /// May be called before or after updateExports for any given Nav. - pub fn updateNav(base: *File, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateNavError!void { + fn updateNav(base: *File, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateNavError!void { const nav = pt.zcu.intern_pool.getNav(nav_index); assert(nav.status == .fully_resolved); switch (base.tag) { @@ -721,7 +721,7 @@ pub const File = struct { TypeFailureReported, }; - pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateContainerTypeError!void { + fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateContainerTypeError!void { switch (base.tag) { else => {}, inline .elf => |tag| { @@ -732,6 +732,7 @@ pub const File = struct { } /// May be called before or after updateExports for any given Decl. + /// TODO: currently `pub` because `Zcu.PerThread` is calling this. pub fn updateFunc( base: *File, pt: Zcu.PerThread, @@ -755,7 +756,7 @@ pub const File = struct { /// On an incremental update, fixup the line number of all `Nav`s at the given `TrackedInst`, because /// its line number has changed. The ZIR instruction `ti_id` has tag `.declaration`. - pub fn updateLineNumber(base: *File, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) UpdateLineNumberError!void { + fn updateLineNumber(base: *File, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) UpdateLineNumberError!void { { const ti = ti_id.resolveFull(&pt.zcu.intern_pool).?; const file = pt.zcu.fileByIndex(ti.file); @@ -1435,10 +1436,10 @@ pub const Task = union(enum) { load_input: Input, /// Write the constant value for a Decl to the output file. - codegen_nav: InternPool.Nav.Index, + link_nav: InternPool.Nav.Index, /// Write the machine code for a function to the output file. - codegen_func: CodegenFunc, - codegen_type: InternPool.Index, + link_func: CodegenFunc, + link_type: InternPool.Index, update_line_number: InternPool.TrackedInst.Index, @@ -1585,47 +1586,90 @@ pub fn doTask(comp: *Compilation, tid: usize, task: Task) void { }, }; }, - .codegen_nav => |nav_index| { - if (comp.remaining_prelink_tasks == 0) { - const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); - defer pt.deactivate(); - pt.linkerUpdateNav(nav_index) catch |err| switch (err) { + .link_nav => |nav_index| { + if (comp.remaining_prelink_tasks != 0) { + comp.link_task_queue_postponed.appendAssumeCapacity(task); + return; + } + const zcu = comp.zcu.?; + const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid)); + defer pt.deactivate(); + if (!Air.valFullyResolved(zcu.navValue(nav_index), zcu)) { + // Type resolution failed in a way which affects this `Nav`. This is a transitive + // failure, but it doesn't need recording, because this `Nav` semantically depends + // on the failed type, so when it is changed the `Nav` will be updated. + return; + } + if (comp.bin_file) |lf| { + lf.updateNav(pt, nav_index) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + error.CodegenFail => assert(zcu.failed_codegen.contains(nav_index)), + error.Overflow, error.RelocationNotByteAligned => { + zcu.failed_codegen.ensureUnusedCapacity(zcu.gpa, 1) catch return diags.setAllocFailure(); + const msg = Zcu.ErrorMsg.create( + zcu.gpa, + zcu.navSrcLoc(nav_index), + "unable to codegen: {s}", + .{@errorName(err)}, + ) catch return diags.setAllocFailure(); + zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, msg); + // Not a retryable failure. + }, + }; + } else if (zcu.llvm_object) |llvm_object| { + llvm_object.updateNav(pt, nav_index) catch |err| switch (err) { error.OutOfMemory => diags.setAllocFailure(), }; - } else { - comp.link_task_queue_postponed.appendAssumeCapacity(task); } }, - .codegen_func => |func| { - if (comp.remaining_prelink_tasks == 0) { - const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); - defer pt.deactivate(); - var air = func.air; - defer air.deinit(comp.gpa); - pt.linkerUpdateFunc(func.func, &air) catch |err| switch (err) { - error.OutOfMemory => diags.setAllocFailure(), - }; - } else { + .link_func => |func| { + if (comp.remaining_prelink_tasks != 0) { comp.link_task_queue_postponed.appendAssumeCapacity(task); + return; } + const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); + defer pt.deactivate(); + var air = func.air; + defer air.deinit(comp.gpa); + pt.linkerUpdateFunc(func.func, &air) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + }; }, - .codegen_type => |ty| { - if (comp.remaining_prelink_tasks == 0) { - const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); - defer pt.deactivate(); - pt.linkerUpdateContainerType(ty) catch |err| switch (err) { - error.OutOfMemory => diags.setAllocFailure(), - }; - } else { + .link_type => |ty| { + if (comp.remaining_prelink_tasks != 0) { comp.link_task_queue_postponed.appendAssumeCapacity(task); + return; + } + const zcu = comp.zcu.?; + const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid)); + defer pt.deactivate(); + if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(zcu.gpa); + if (!Air.typeFullyResolved(.fromInterned(ty), zcu)) { + // Type resolution failed in a way which affects this type. This is a transitive + // failure, but it doesn't need recording, because this type semantically depends + // on the failed type, so when that is changed, this type will be updated. + return; + } + if (comp.bin_file) |lf| { + lf.updateContainerType(pt, ty) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + error.TypeFailureReported => assert(zcu.failed_types.contains(ty)), + }; } }, .update_line_number => |ti| { + if (comp.remaining_prelink_tasks != 0) { + comp.link_task_queue_postponed.appendAssumeCapacity(task); + return; + } const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid)); defer pt.deactivate(); - pt.linkerUpdateLineNumber(ti) catch |err| switch (err) { - error.OutOfMemory => diags.setAllocFailure(), - }; + if (comp.bin_file) |lf| { + lf.updateLineNumber(pt, ti) catch |err| switch (err) { + error.OutOfMemory => diags.setAllocFailure(), + else => |e| log.err("update line number failed: {s}", .{@errorName(e)}), + }; + } }, } }