diff --git a/src/Compilation.zig b/src/Compilation.zig index 8d276980ea..74da4b9fe2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -106,10 +106,7 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa pub fn deinit(_: @This(), _: Allocator) void {} } = .{}, -link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .empty, -link_errors_mutex: std.Thread.Mutex = .{}, -link_error_flags: link.File.ErrorFlags = .{}, -lld_errors: std.ArrayListUnmanaged(LldError) = .empty, +link_diags: link.Diags, work_queues: [ len: { @@ -842,21 +839,6 @@ pub const MiscError = struct { } }; -pub const LldError = struct { - /// Allocated with gpa. - msg: []const u8, - context_lines: []const []const u8 = &.{}, - - pub fn deinit(self: *LldError, gpa: Allocator) void { - for (self.context_lines) |line| { - gpa.free(line); - } - - gpa.free(self.context_lines); - gpa.free(self.msg); - } -}; - pub const EmitLoc = struct { /// If this is `null` it means the file will be output to the cache directory. /// When provided, both the open file handle and the path name must outlive the `Compilation`. @@ -1558,6 +1540,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .global_cc_argv = options.global_cc_argv, .file_system_inputs = options.file_system_inputs, .parent_whole_cache = options.parent_whole_cache, + .link_diags = .init(gpa), }; // Prevent some footguns by making the "any" fields of config reflect @@ -1999,13 +1982,7 @@ pub fn destroy(comp: *Compilation) void { } comp.failed_win32_resources.deinit(gpa); - for (comp.link_errors.items) |*item| item.deinit(gpa); - comp.link_errors.deinit(gpa); - - for (comp.lld_errors.items) |*lld_error| { - lld_error.deinit(gpa); - } - comp.lld_errors.deinit(gpa); + comp.link_diags.deinit(); comp.clearMiscFailures(); @@ -2304,7 +2281,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { if (anyErrors(comp)) { // Skip flushing and keep source files loaded for error reporting. - comp.link_error_flags = .{}; + comp.link_diags.flags = .{}; return; } @@ -2451,7 +2428,7 @@ fn flush( if (comp.bin_file) |lf| { // This is needed before reading the error flags. lf.flush(arena, tid, prog_node) catch |err| switch (err) { - error.FlushFailure, error.LinkFailure => {}, // error reported through link_error_flags + error.FlushFailure, error.LinkFailure => {}, // error reported through link_diags.flags error.LLDReportedFailure => {}, // error reported via lockAndParseLldStderr else => |e| return e, }; @@ -3070,7 +3047,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { try bundle.addBundleAsRoots(error_bundle); } - for (comp.lld_errors.items) |lld_error| { + for (comp.link_diags.lld.items) |lld_error| { const notes_len = @as(u32, @intCast(lld_error.context_lines.len)); try bundle.addRootErrorMessage(.{ @@ -3091,7 +3068,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); if (value.children) |b| try bundle.addBundleAsNotes(b); } - if (comp.alloc_failure_occurred) { + if (comp.alloc_failure_occurred or comp.link_diags.flags.alloc_failure_occurred) { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString("memory allocation failure"), }); @@ -3220,14 +3197,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } if (bundle.root_list.items.len == 0) { - if (comp.link_error_flags.no_entry_point_found) { + if (comp.link_diags.flags.no_entry_point_found) { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString("no entry point found"), }); } } - if (comp.link_error_flags.missing_libc) { + if (comp.link_diags.flags.missing_libc) { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString("libc not available"), .notes_len = 2, @@ -3241,7 +3218,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { })); } - for (comp.link_errors.items) |link_err| { + for (comp.link_diags.msgs.items) |link_err| { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(link_err.msg), .notes_len = @intCast(link_err.notes.len), @@ -6161,6 +6138,7 @@ fn wantBuildLibUnwindFromSource(comp: *Compilation) bool { } fn setAllocFailure(comp: *Compilation) void { + @branchHint(.cold); log.debug("memory allocation failure", .{}); comp.alloc_failure_occurred = true; } @@ -6195,54 +6173,6 @@ pub fn lockAndSetMiscFailure( return setMiscFailure(comp, tag, format, args); } -fn parseLldStderr(comp: *Compilation, prefix: []const u8, stderr: []const u8) Allocator.Error!void { - var context_lines = std.ArrayList([]const u8).init(comp.gpa); - defer context_lines.deinit(); - - var current_err: ?*LldError = null; - var lines = mem.splitSequence(u8, stderr, if (builtin.os.tag == .windows) "\r\n" else "\n"); - while (lines.next()) |line| { - if (line.len > prefix.len + ":".len and - mem.eql(u8, line[0..prefix.len], prefix) and line[prefix.len] == ':') - { - if (current_err) |err| { - err.context_lines = try context_lines.toOwnedSlice(); - } - - var split = mem.splitSequence(u8, line, "error: "); - _ = split.first(); - - const duped_msg = try std.fmt.allocPrint(comp.gpa, "{s}: {s}", .{ prefix, split.rest() }); - errdefer comp.gpa.free(duped_msg); - - current_err = try comp.lld_errors.addOne(comp.gpa); - current_err.?.* = .{ .msg = duped_msg }; - } else if (current_err != null) { - const context_prefix = ">>> "; - var trimmed = mem.trimRight(u8, line, &std.ascii.whitespace); - if (mem.startsWith(u8, trimmed, context_prefix)) { - trimmed = trimmed[context_prefix.len..]; - } - - if (trimmed.len > 0) { - const duped_line = try comp.gpa.dupe(u8, trimmed); - try context_lines.append(duped_line); - } - } - } - - if (current_err) |err| { - err.context_lines = try context_lines.toOwnedSlice(); - } -} - -pub fn lockAndParseLldStderr(comp: *Compilation, prefix: []const u8, stderr: []const u8) void { - comp.mutex.lock(); - defer comp.mutex.unlock(); - - comp.parseLldStderr(prefix, stderr) catch comp.setAllocFailure(); -} - pub fn dump_argv(argv: []const []const u8) void { std.debug.lockStdErr(); defer std.debug.unlockStdErr(); diff --git a/src/link.zig b/src/link.zig index 64115ab3ee..ebfee34c4b 100644 --- a/src/link.zig +++ b/src/link.zig @@ -37,6 +37,252 @@ pub const SystemLib = struct { path: ?Path, }; +pub const Diags = struct { + /// Stored here so that function definitions can distinguish between + /// needing an allocator for things besides error reporting. + gpa: Allocator, + mutex: std.Thread.Mutex, + msgs: std.ArrayListUnmanaged(Msg), + flags: Flags, + lld: std.ArrayListUnmanaged(Lld), + + pub const Flags = packed struct { + no_entry_point_found: bool = false, + missing_libc: bool = false, + alloc_failure_occurred: bool = false, + + const Int = blk: { + const bits = @typeInfo(@This()).@"struct".fields.len; + break :blk @Type(.{ .int = .{ + .signedness = .unsigned, + .bits = bits, + } }); + }; + + pub fn anySet(ef: Flags) bool { + return @as(Int, @bitCast(ef)) > 0; + } + }; + + pub const Lld = struct { + /// Allocated with gpa. + msg: []const u8, + context_lines: []const []const u8 = &.{}, + + pub fn deinit(self: *Lld, gpa: Allocator) void { + for (self.context_lines) |line| gpa.free(line); + gpa.free(self.context_lines); + gpa.free(self.msg); + self.* = undefined; + } + }; + + pub const Msg = struct { + msg: []const u8, + notes: []Msg = &.{}, + + pub fn deinit(self: *Msg, gpa: Allocator) void { + for (self.notes) |*note| note.deinit(gpa); + gpa.free(self.notes); + gpa.free(self.msg); + } + }; + + pub const ErrorWithNotes = struct { + diags: *Diags, + /// Allocated index in diags.msgs array. + index: usize, + /// Next available note slot. + note_slot: usize = 0, + + pub fn addMsg( + err: ErrorWithNotes, + comptime format: []const u8, + args: anytype, + ) error{OutOfMemory}!void { + const gpa = err.diags.gpa; + const err_msg = &err.diags.msgs.items[err.index]; + err_msg.msg = try std.fmt.allocPrint(gpa, format, args); + } + + pub fn addNote( + err: *ErrorWithNotes, + comptime format: []const u8, + args: anytype, + ) error{OutOfMemory}!void { + const gpa = err.diags.gpa; + const err_msg = &err.diags.msgs.items[err.index]; + assert(err.note_slot < err_msg.notes.len); + err_msg.notes[err.note_slot] = .{ .msg = try std.fmt.allocPrint(gpa, format, args) }; + err.note_slot += 1; + } + }; + + pub fn init(gpa: Allocator) Diags { + return .{ + .gpa = gpa, + .mutex = .{}, + .msgs = .empty, + .flags = .{}, + .lld = .empty, + }; + } + + pub fn deinit(diags: *Diags) void { + const gpa = diags.gpa; + + for (diags.msgs.items) |*item| item.deinit(gpa); + diags.msgs.deinit(gpa); + + for (diags.lld.items) |*item| item.deinit(gpa); + diags.lld.deinit(gpa); + + diags.* = undefined; + } + + pub fn hasErrors(diags: *Diags) bool { + return diags.msgs.items.len > 0 or diags.flags.anySet(); + } + + pub fn lockAndParseLldStderr(diags: *Diags, prefix: []const u8, stderr: []const u8) void { + diags.mutex.lock(); + defer diags.mutex.unlock(); + + diags.parseLldStderr(prefix, stderr) catch diags.setAllocFailure(); + } + + fn parseLldStderr( + diags: *Diags, + prefix: []const u8, + stderr: []const u8, + ) Allocator.Error!void { + const gpa = diags.gpa; + + var context_lines = std.ArrayList([]const u8).init(gpa); + defer context_lines.deinit(); + + var current_err: ?*Lld = null; + var lines = mem.splitSequence(u8, stderr, if (builtin.os.tag == .windows) "\r\n" else "\n"); + while (lines.next()) |line| { + if (line.len > prefix.len + ":".len and + mem.eql(u8, line[0..prefix.len], prefix) and line[prefix.len] == ':') + { + if (current_err) |err| { + err.context_lines = try context_lines.toOwnedSlice(); + } + + var split = mem.splitSequence(u8, line, "error: "); + _ = split.first(); + + const duped_msg = try std.fmt.allocPrint(gpa, "{s}: {s}", .{ prefix, split.rest() }); + errdefer gpa.free(duped_msg); + + current_err = try diags.lld.addOne(gpa); + current_err.?.* = .{ .msg = duped_msg }; + } else if (current_err != null) { + const context_prefix = ">>> "; + var trimmed = mem.trimRight(u8, line, &std.ascii.whitespace); + if (mem.startsWith(u8, trimmed, context_prefix)) { + trimmed = trimmed[context_prefix.len..]; + } + + if (trimmed.len > 0) { + const duped_line = try gpa.dupe(u8, trimmed); + try context_lines.append(duped_line); + } + } + } + + if (current_err) |err| { + err.context_lines = try context_lines.toOwnedSlice(); + } + } + + pub fn fail(diags: *Diags, comptime format: []const u8, args: anytype) error{LinkFailure} { + @branchHint(.cold); + addError(diags, format, args); + return error.LinkFailure; + } + + pub fn addError(diags: *Diags, comptime format: []const u8, args: anytype) void { + @branchHint(.cold); + const gpa = diags.gpa; + diags.mutex.lock(); + defer diags.mutex.unlock(); + diags.msgs.ensureUnusedCapacity(gpa, 1) catch |err| switch (err) { + error.OutOfMemory => { + diags.flags.alloc_failure_occurred = true; + return; + }, + }; + const err_msg: Msg = .{ + .msg = std.fmt.allocPrint(gpa, format, args) catch |err| switch (err) { + error.OutOfMemory => { + diags.flags.alloc_failure_occurred = true; + return; + }, + }, + }; + diags.msgs.appendAssumeCapacity(err_msg); + } + + pub fn addErrorWithNotes(diags: *Diags, note_count: usize) error{OutOfMemory}!ErrorWithNotes { + @branchHint(.cold); + const gpa = diags.gpa; + diags.mutex.lock(); + defer diags.mutex.unlock(); + try diags.msgs.ensureUnusedCapacity(gpa, 1); + return addErrorWithNotesAssumeCapacity(diags, note_count); + } + + pub fn addErrorWithNotesAssumeCapacity(diags: *Diags, note_count: usize) error{OutOfMemory}!ErrorWithNotes { + @branchHint(.cold); + const gpa = diags.gpa; + const index = diags.msgs.items.len; + const err = diags.msgs.addOneAssumeCapacity(); + err.* = .{ + .msg = undefined, + .notes = try gpa.alloc(Diags.Msg, note_count), + }; + return .{ + .diags = diags, + .index = index, + }; + } + + pub fn reportMissingLibraryError( + diags: *Diags, + checked_paths: []const []const u8, + comptime format: []const u8, + args: anytype, + ) error{OutOfMemory}!void { + @branchHint(.cold); + var err = try diags.addErrorWithNotes(checked_paths.len); + try err.addMsg(format, args); + for (checked_paths) |path| { + try err.addNote("tried {s}", .{path}); + } + } + + pub fn reportParseError( + diags: *Diags, + path: Path, + comptime format: []const u8, + args: anytype, + ) error{OutOfMemory}!void { + @branchHint(.cold); + var err = try diags.addErrorWithNotes(1); + try err.addMsg(format, args); + try err.addNote("while parsing {}", .{path}); + } + + pub fn setAllocFailure(diags: *Diags) void { + @branchHint(.cold); + log.debug("memory allocation failure", .{}); + diags.flags.alloc_failure_occurred = true; + } +}; + pub fn hashAddSystemLibs( man: *Cache.Manifest, hm: std.StringArrayHashMapUnmanaged(SystemLib), @@ -446,58 +692,6 @@ pub const File = struct { } } - pub const ErrorWithNotes = struct { - base: *const File, - - /// Allocated index in base.errors array. - index: usize, - - /// Next available note slot. - note_slot: usize = 0, - - pub fn addMsg( - err: ErrorWithNotes, - comptime format: []const u8, - args: anytype, - ) error{OutOfMemory}!void { - const gpa = err.base.comp.gpa; - const err_msg = &err.base.comp.link_errors.items[err.index]; - err_msg.msg = try std.fmt.allocPrint(gpa, format, args); - } - - pub fn addNote( - err: *ErrorWithNotes, - comptime format: []const u8, - args: anytype, - ) error{OutOfMemory}!void { - const gpa = err.base.comp.gpa; - const err_msg = &err.base.comp.link_errors.items[err.index]; - assert(err.note_slot < err_msg.notes.len); - err_msg.notes[err.note_slot] = .{ .msg = try std.fmt.allocPrint(gpa, format, args) }; - err.note_slot += 1; - } - }; - - pub fn addErrorWithNotes(base: *const File, note_count: usize) error{OutOfMemory}!ErrorWithNotes { - base.comp.link_errors_mutex.lock(); - defer base.comp.link_errors_mutex.unlock(); - const gpa = base.comp.gpa; - try base.comp.link_errors.ensureUnusedCapacity(gpa, 1); - return base.addErrorWithNotesAssumeCapacity(note_count); - } - - pub fn addErrorWithNotesAssumeCapacity(base: *const File, note_count: usize) error{OutOfMemory}!ErrorWithNotes { - const gpa = base.comp.gpa; - const index = base.comp.link_errors.items.len; - const err = base.comp.link_errors.addOneAssumeCapacity(); - err.* = .{ .msg = undefined, .notes = try gpa.alloc(ErrorMsg, note_count) }; - return .{ .base = base, .index = index }; - } - - pub fn hasErrors(base: *const File) bool { - return base.comp.link_errors.items.len > 0 or base.comp.link_error_flags.isSet(); - } - pub fn releaseLock(self: *File) void { if (self.lock) |*lock| { lock.release(); @@ -523,7 +717,7 @@ pub const File = struct { } /// TODO audit this error set. most of these should be collapsed into one error, - /// and ErrorFlags should be updated to convey the meaning to the user. + /// and Diags.Flags should be updated to convey the meaning to the user. pub const FlushError = error{ CacheUnavailable, CurrentWorkingDirectoryUnlinked, @@ -939,36 +1133,6 @@ pub const File = struct { } }; - pub const ErrorFlags = packed struct { - no_entry_point_found: bool = false, - missing_libc: bool = false, - - const Int = blk: { - const bits = @typeInfo(@This()).@"struct".fields.len; - break :blk @Type(.{ .int = .{ - .signedness = .unsigned, - .bits = bits, - } }); - }; - - fn isSet(ef: ErrorFlags) bool { - return @as(Int, @bitCast(ef)) > 0; - } - }; - - pub const ErrorMsg = struct { - msg: []const u8, - notes: []ErrorMsg = &.{}, - - pub fn deinit(self: *ErrorMsg, gpa: Allocator) void { - for (self.notes) |*note| { - note.deinit(gpa); - } - gpa.free(self.notes); - gpa.free(self.msg); - } - }; - pub const LazySymbol = struct { pub const Kind = enum { code, const_data }; @@ -1154,7 +1318,8 @@ pub fn spawnLld( switch (term) { .Exited => |code| if (code != 0) { if (comp.clang_passthrough_mode) std.process.exit(code); - comp.lockAndParseLldStderr(argv[1], stderr); + const diags = &comp.link_diags; + diags.lockAndParseLldStderr(argv[1], stderr); return error.LLDReportedFailure; }, else => { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b104cd5840..1b25ee5e09 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1679,6 +1679,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no const comp = self.base.comp; const gpa = comp.gpa; + const diags = &comp.link_diags; if (self.llvm_object) |llvm_object| { try self.base.emitLlvmObject(arena, llvm_object, prog_node); @@ -1796,10 +1797,10 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no if (self.entry_addr == null and comp.config.output_mode == .Exe) { log.debug("flushing. no_entry_point_found = true\n", .{}); - comp.link_error_flags.no_entry_point_found = true; + diags.flags.no_entry_point_found = true; } else { log.debug("flushing. no_entry_point_found = false\n", .{}); - comp.link_error_flags.no_entry_point_found = false; + diags.flags.no_entry_point_found = false; try self.writeHeader(); } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 47554d2200..526c2932d9 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -769,6 +769,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod const comp = self.base.comp; const gpa = comp.gpa; + const diags = &comp.link_diags; if (self.llvm_object) |llvm_object| { try self.base.emitLlvmObject(arena, llvm_object, prog_node); @@ -848,7 +849,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod } // libc dep - comp.link_error_flags.missing_libc = false; + diags.flags.missing_libc = false; if (comp.config.link_libc) { if (comp.libc_installation) |lc| { const flags = target_util.libcFullLinkFlags(target); @@ -868,7 +869,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .static)) break :success; - try self.reportMissingLibraryError( + try diags.reportMissingLibraryError( checked_paths.items, "missing system library: '{s}' was not found", .{lib_name}, @@ -901,7 +902,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod }); try self.parseLibraryReportingFailure(.{ .path = path }, false); } else { - comp.link_error_flags.missing_libc = true; + diags.flags.missing_libc = true; } } @@ -920,7 +921,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod if (csu.crtend) |path| try parseObjectReportingFailure(self, path); if (csu.crtn) |path| try parseObjectReportingFailure(self, path); - if (self.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; // Dedup shared objects { @@ -1078,14 +1079,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod if (self.base.isExe() and self.linkerDefinedPtr().?.entry_index == null) { log.debug("flushing. no_entry_point_found = true", .{}); - comp.link_error_flags.no_entry_point_found = true; + diags.flags.no_entry_point_found = true; } else { log.debug("flushing. no_entry_point_found = false", .{}); - comp.link_error_flags.no_entry_point_found = false; + diags.flags.no_entry_point_found = false; try self.writeElfHeader(); } - if (self.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; } /// --verbose-link output @@ -1358,7 +1359,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void { } pub const ParseError = error{ - /// Indicates the error is already reported on `Compilation.link_errors`. + /// Indicates the error is already reported on `Compilation.link_diags`. LinkFailure, OutOfMemory, @@ -1484,7 +1485,10 @@ fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void { const tracy = trace(@src()); defer tracy.end(); - const gpa = self.base.comp.gpa; + const comp = self.base.comp; + const gpa = comp.gpa; + const diags = &comp.link_diags; + const in_file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{}); defer in_file.close(); const data = try in_file.readToEndAlloc(gpa, std.math.maxInt(u32)); @@ -1533,7 +1537,7 @@ fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void { } } - try self.reportMissingLibraryError( + try diags.reportMissingLibraryError( checked_paths.items, "missing library dependency: GNU ld script '{}' requires '{s}', but file not found", .{ @as(Path, lib.path), script_arg.path }, @@ -1856,6 +1860,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s const comp = self.base.comp; const gpa = comp.gpa; + const diags = &comp.link_diags; const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type. const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path}); @@ -2376,7 +2381,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s } // libc dep - comp.link_error_flags.missing_libc = false; + diags.flags.missing_libc = false; if (comp.config.link_libc) { if (comp.libc_installation != null) { const needs_grouping = link_mode == .static; @@ -2401,7 +2406,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s .dynamic => "libc.so", })); } else { - comp.link_error_flags.missing_libc = true; + diags.flags.missing_libc = true; } } } @@ -2546,7 +2551,8 @@ fn writePhdrTable(self: *Elf) !void { } pub fn writeElfHeader(self: *Elf) !void { - if (self.base.hasErrors()) return; // We had errors, so skip flushing to render the output unusable + const diags = &self.base.comp.link_diags; + if (diags.hasErrors()) return; // We had errors, so skip flushing to render the output unusable const comp = self.base.comp; var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined; @@ -3700,6 +3706,7 @@ fn addLoadPhdrs(self: *Elf) error{OutOfMemory}!void { /// Allocates PHDR table in virtual memory and in file. fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void { + const diags = &self.base.comp.link_diags; const phdr_table = &self.phdrs.items[self.phdr_indexes.table.int().?]; const phdr_table_load = &self.phdrs.items[self.phdr_indexes.table_load.int().?]; @@ -3720,7 +3727,7 @@ fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void { // (revisit getMaxNumberOfPhdrs()) // 2. shift everything in file to free more space for EHDR + PHDR table // TODO verify `getMaxNumberOfPhdrs()` is accurate and convert this into no-op - var err = try self.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("fatal linker error: not enough space reserved for EHDR and PHDR table", .{}); try err.addNote("required 0x{x}, available 0x{x}", .{ needed_size, available_space }); } @@ -4855,16 +4862,17 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 { fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { const gpa = self.base.comp.gpa; + const diags = &self.base.comp.link_diags; const max_notes = 4; - try self.base.comp.link_errors.ensureUnusedCapacity(gpa, undefs.count()); + try diags.msgs.ensureUnusedCapacity(gpa, undefs.count()); for (undefs.keys(), undefs.values()) |key, refs| { const undef_sym = self.resolver.keys.items[key - 1]; const nrefs = @min(refs.items.len, max_notes); const nnotes = nrefs + @intFromBool(refs.items.len > max_notes); - var err = try self.base.addErrorWithNotesAssumeCapacity(nnotes); + var err = try diags.addErrorWithNotesAssumeCapacity(nnotes); try err.addMsg("undefined symbol: {s}", .{undef_sym.name(self)}); for (refs.items[0..nrefs]) |ref| { @@ -4882,6 +4890,7 @@ fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemory }!void { if (dupes.keys().len == 0) return; // Nothing to do + const diags = &self.base.comp.link_diags; const max_notes = 3; @@ -4889,7 +4898,7 @@ fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemor const sym = self.resolver.keys.items[key - 1]; const nnotes = @min(notes.items.len, max_notes) + @intFromBool(notes.items.len > max_notes); - var err = try self.base.addErrorWithNotes(nnotes + 1); + var err = try diags.addErrorWithNotes(nnotes + 1); try err.addMsg("duplicate symbol definition: {s}", .{sym.name(self)}); try err.addNote("defined by {}", .{sym.file(self).?.fmtPath()}); @@ -4908,21 +4917,9 @@ fn reportDuplicates(self: *Elf, dupes: anytype) error{ HasDuplicates, OutOfMemor return error.HasDuplicates; } -fn reportMissingLibraryError( - self: *Elf, - checked_paths: []const []const u8, - comptime format: []const u8, - args: anytype, -) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(checked_paths.len); - try err.addMsg(format, args); - for (checked_paths) |path| { - try err.addNote("tried {s}", .{path}); - } -} - fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(0); + const diags = &self.base.comp.link_diags; + var err = try diags.addErrorWithNotes(0); try err.addMsg("fatal linker error: unsupported CPU architecture {s}", .{ @tagName(self.getTarget().cpu.arch), }); @@ -4934,7 +4931,8 @@ pub fn addParseError( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(1); + const diags = &self.base.comp.link_diags; + var err = try diags.addErrorWithNotes(1); try err.addMsg(format, args); try err.addNote("while parsing {}", .{path}); } @@ -4945,7 +4943,8 @@ pub fn addFileError( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(1); + const diags = &self.base.comp.link_diags; + var err = try diags.addErrorWithNotes(1); try err.addMsg(format, args); try err.addNote("while parsing {}", .{self.file(file_index).?.fmtPath()}); } diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 20aa249ea5..8580f27d2d 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -519,7 +519,8 @@ fn dataType(symbol: *const Symbol, elf_file: *Elf) u2 { } fn reportUnhandledRelocError(self: Atom, rel: elf.Elf64_Rela, elf_file: *Elf) RelocError!void { - var err = try elf_file.base.addErrorWithNotes(1); + const diags = &elf_file.base.comp.link_diags; + var err = try diags.addErrorWithNotes(1); try err.addMsg("fatal linker error: unhandled relocation type {} at offset 0x{x}", .{ relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch), rel.r_offset, @@ -534,7 +535,8 @@ fn reportTextRelocError( rel: elf.Elf64_Rela, elf_file: *Elf, ) RelocError!void { - var err = try elf_file.base.addErrorWithNotes(1); + const diags = &elf_file.base.comp.link_diags; + var err = try diags.addErrorWithNotes(1); try err.addMsg("relocation at offset 0x{x} against symbol '{s}' cannot be used", .{ rel.r_offset, symbol.name(elf_file), @@ -549,7 +551,8 @@ fn reportPicError( rel: elf.Elf64_Rela, elf_file: *Elf, ) RelocError!void { - var err = try elf_file.base.addErrorWithNotes(2); + const diags = &elf_file.base.comp.link_diags; + var err = try diags.addErrorWithNotes(2); try err.addMsg("relocation at offset 0x{x} against symbol '{s}' cannot be used", .{ rel.r_offset, symbol.name(elf_file), @@ -565,7 +568,8 @@ fn reportNoPicError( rel: elf.Elf64_Rela, elf_file: *Elf, ) RelocError!void { - var err = try elf_file.base.addErrorWithNotes(2); + const diags = &elf_file.base.comp.link_diags; + var err = try diags.addErrorWithNotes(2); try err.addMsg("relocation at offset 0x{x} against symbol '{s}' cannot be used", .{ rel.r_offset, symbol.name(elf_file), @@ -1082,6 +1086,7 @@ const x86_64 = struct { stream: anytype, ) (error{ InvalidInstruction, CannotEncode } || RelocError)!void { dev.check(.x86_64_backend); + const diags = &elf_file.base.comp.link_diags; const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type()); const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow; @@ -1176,7 +1181,7 @@ const x86_64 = struct { try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little); } else { x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("could not relax {s}", .{@tagName(r_type)}); try err.addNote("in {}:{s} at offset 0x{x}", .{ atom.file(elf_file).?.fmtPath(), @@ -1301,6 +1306,7 @@ const x86_64 = struct { ) !void { dev.check(.x86_64_backend); assert(rels.len == 2); + const diags = &elf_file.base.comp.link_diags; const writer = stream.writer(); const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type()); switch (rel) { @@ -1317,7 +1323,7 @@ const x86_64 = struct { }, else => { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("TODO: rewrite {} when followed by {}", .{ relocation.fmtRelocType(rels[0].r_type(), .x86_64), relocation.fmtRelocType(rels[1].r_type(), .x86_64), @@ -1341,6 +1347,7 @@ const x86_64 = struct { ) !void { dev.check(.x86_64_backend); assert(rels.len == 2); + const diags = &elf_file.base.comp.link_diags; const writer = stream.writer(); const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type()); switch (rel) { @@ -1372,7 +1379,7 @@ const x86_64 = struct { }, else => { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("TODO: rewrite {} when followed by {}", .{ relocation.fmtRelocType(rels[0].r_type(), .x86_64), relocation.fmtRelocType(rels[1].r_type(), .x86_64), @@ -1446,6 +1453,7 @@ const x86_64 = struct { ) !void { dev.check(.x86_64_backend); assert(rels.len == 2); + const diags = &elf_file.base.comp.link_diags; const writer = stream.writer(); const rel: elf.R_X86_64 = @enumFromInt(rels[1].r_type()); switch (rel) { @@ -1468,7 +1476,7 @@ const x86_64 = struct { }, else => { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("fatal linker error: rewrite {} when followed by {}", .{ relocation.fmtRelocType(rels[0].r_type(), .x86_64), relocation.fmtRelocType(rels[1].r_type(), .x86_64), @@ -1603,6 +1611,7 @@ const aarch64 = struct { ) (error{ UnexpectedRemainder, DivisionByZero } || RelocError)!void { _ = it; + const diags = &elf_file.base.comp.link_diags; const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type()); const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow; const cwriter = stream.writer(); @@ -1657,7 +1666,7 @@ const aarch64 = struct { aarch64_util.writeAdrpInst(pages, code); } else { // TODO: relax - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("TODO: relax ADR_GOT_PAGE", .{}); try err.addNote("in {}:{s} at offset 0x{x}", .{ atom.file(elf_file).?.fmtPath(), @@ -1882,6 +1891,7 @@ const riscv = struct { code: []u8, stream: anytype, ) !void { + const diags = &elf_file.base.comp.link_diags; const r_type: elf.R_RISCV = @enumFromInt(rel.r_type()); const r_offset = std.math.cast(usize, rel.r_offset) orelse return error.Overflow; const cwriter = stream.writer(); @@ -1943,7 +1953,7 @@ const riscv = struct { if (S == atom_addr + @as(i64, @intCast(pair.r_offset))) break pair; } else { // TODO: implement searching forward - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("TODO: find HI20 paired reloc scanning forward", .{}); try err.addNote("in {}:{s} at offset 0x{x}", .{ atom.file(elf_file).?.fmtPath(), diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index d1f1870036..2c0313609f 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -644,6 +644,7 @@ pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutO pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void { const gpa = elf_file.base.comp.gpa; + const diags = &elf_file.base.comp.link_diags; try self.input_merge_sections.ensureUnusedCapacity(gpa, self.shdrs.items.len); try self.input_merge_sections_indexes.resize(gpa, self.shdrs.items.len); @@ -685,7 +686,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void { var end = start; while (end < data.len - sh_entsize and !isNull(data[end .. end + sh_entsize])) : (end += sh_entsize) {} if (!isNull(data[end .. end + sh_entsize])) { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("string not null terminated", .{}); try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); return error.LinkFailure; @@ -700,7 +701,7 @@ pub fn initInputMergeSections(self: *Object, elf_file: *Elf) !void { const sh_entsize: u32 = @intCast(shdr.sh_entsize); if (sh_entsize == 0) continue; // Malformed, don't split but don't error out if (shdr.sh_size % sh_entsize != 0) { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("size not a multiple of sh_entsize", .{}); try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); return error.LinkFailure; @@ -738,6 +739,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{ Overflow, }!void { const gpa = elf_file.base.comp.gpa; + const diags = &elf_file.base.comp.link_diags; for (self.input_merge_sections_indexes.items) |index| { const imsec = self.inputMergeSection(index) orelse continue; @@ -776,7 +778,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{ const imsec = self.inputMergeSection(imsec_index) orelse continue; if (imsec.offsets.items.len == 0) continue; const res = imsec.findSubsection(@intCast(esym.st_value)) orelse { - var err = try elf_file.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("invalid symbol value: {x}", .{esym.st_value}); try err.addNote("for symbol {s}", .{sym.name(elf_file)}); try err.addNote("in {}", .{self.fmtPath()}); @@ -802,7 +804,7 @@ pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) error{ if (imsec.offsets.items.len == 0) continue; const msec = elf_file.mergeSection(imsec.merge_section_index); const res = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse { - var err = try elf_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("invalid relocation at offset 0x{x}", .{rel.r_offset}); try err.addNote("in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) }); return error.LinkFailure; diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index 0e08677b80..81913cb33c 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -611,7 +611,8 @@ const riscv = struct { }; fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void { - var err = try elf_file.base.addErrorWithNotes(1); + const diags = &elf_file.base.comp.link_diags; + var err = try diags.addErrorWithNotes(1); try err.addMsg("invalid relocation type {} at offset 0x{x}", .{ relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch), rel.r_offset, diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 944a6e0e46..3af2d919d6 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -1,5 +1,6 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { const gpa = comp.gpa; + const diags = &comp.link_diags; for (comp.objects) |obj| { switch (Compilation.classifyFileExt(obj.path.sub_path)) { @@ -21,7 +22,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path try parseObjectStaticLibReportingFailure(elf_file, comp.compiler_rt_obj.?.full_object_path); } - if (elf_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; // First, we flush relocatable object file generated with our backends. if (elf_file.zigObjectPtr()) |zig_object| { @@ -146,10 +147,12 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path try elf_file.base.file.?.setEndPos(total_size); try elf_file.base.file.?.pwriteAll(buffer.items, 0); - if (elf_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; } pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { + const diags = &comp.link_diags; + for (comp.objects) |obj| { if (obj.isObject()) { try elf_file.parseObjectReportingFailure(obj.path); @@ -167,7 +170,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) l if (module_obj_path) |path| try elf_file.parseObjectReportingFailure(path); - if (elf_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; // Now, we are ready to resolve the symbols across all input files. // We will first resolve the files in the ZigObject, next in the parsed @@ -213,7 +216,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) l try elf_file.writeShdrTable(); try elf_file.writeElfHeader(); - if (elf_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; } fn parseObjectStaticLibReportingFailure(elf_file: *Elf, path: Path) error{OutOfMemory}!void { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 38684e4d6a..10bae046da 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -100,7 +100,6 @@ debug_rnglists_sect_index: ?u8 = null, has_tlv: AtomicBool = AtomicBool.init(false), binds_to_weak: AtomicBool = AtomicBool.init(false), weak_defines: AtomicBool = AtomicBool.init(false), -has_errors: AtomicBool = AtomicBool.init(false), /// Options /// SDK layout @@ -347,6 +346,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n const comp = self.base.comp; const gpa = comp.gpa; + const diags = &self.base.comp.link_diags; if (self.llvm_object) |llvm_object| { try self.base.emitLlvmObject(arena, llvm_object, prog_node); @@ -397,8 +397,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n for (positionals.items) |obj| { self.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err| switch (err) { - error.UnknownFileType => try self.reportParseError(obj.path, "unknown file type for an input file", .{}), - else => |e| try self.reportParseError( + error.UnknownFileType => try diags.reportParseError(obj.path, "unknown file type for an input file", .{}), + else => |e| try diags.reportParseError( obj.path, "unexpected error: reading input file failed with error {s}", .{@errorName(e)}, @@ -444,8 +444,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n for (system_libs.items) |lib| { self.classifyInputFile(lib.path, lib, false) catch |err| switch (err) { - error.UnknownFileType => try self.reportParseError(lib.path, "unknown file type for an input file", .{}), - else => |e| try self.reportParseError( + error.UnknownFileType => try diags.reportParseError(lib.path, "unknown file type for an input file", .{}), + else => |e| try diags.reportParseError( lib.path, "unexpected error: parsing input file failed with error {s}", .{@errorName(e)}, @@ -461,8 +461,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n }; if (compiler_rt_path) |path| { self.classifyInputFile(path, .{ .path = path }, false) catch |err| switch (err) { - error.UnknownFileType => try self.reportParseError(path, "unknown file type for an input file", .{}), - else => |e| try self.reportParseError( + error.UnknownFileType => try diags.reportParseError(path, "unknown file type for an input file", .{}), + else => |e| try diags.reportParseError( path, "unexpected error: parsing input file failed with error {s}", .{@errorName(e)}, @@ -474,14 +474,11 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n self.parseDependentDylibs() catch |err| { switch (err) { error.MissingLibraryDependencies => {}, - else => |e| try self.reportUnexpectedError( - "unexpected error while parsing dependent libraries: {s}", - .{@errorName(e)}, - ), + else => |e| return diags.fail("failed to parse dependent libraries: {s}", .{@errorName(e)}), } }; - if (self.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; { const index = @as(File.Index, @intCast(try self.files.addOne(gpa))); @@ -502,10 +499,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n self.checkDuplicates() catch |err| switch (err) { error.HasDuplicates => return error.FlushFailure, - else => |e| { - try self.reportUnexpectedError("unexpected error while checking for duplicate symbol definitions", .{}); - return e; - }, + else => |e| return diags.fail("failed to check for duplicate symbol definitions: {s}", .{@errorName(e)}), }; self.markImportsAndExports(); @@ -520,10 +514,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n self.scanRelocs() catch |err| switch (err) { error.HasUndefinedSymbols => return error.FlushFailure, - else => |e| { - try self.reportUnexpectedError("unexpected error while scanning relocations", .{}); - return e; - }, + else => |e| return diags.fail("failed to scan relocations: {s}", .{@errorName(e)}), }; try self.initOutputSections(); @@ -784,6 +775,8 @@ pub fn resolveLibSystem( comp: *Compilation, out_libs: anytype, ) !void { + const diags = &self.base.comp.link_diags; + var test_path = std.ArrayList(u8).init(arena); var checked_paths = std.ArrayList([]const u8).init(arena); @@ -803,7 +796,7 @@ pub fn resolveLibSystem( if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success; } - try self.reportMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{}); + try diags.reportMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{}); return error.MissingLibSystem; } @@ -845,6 +838,7 @@ pub fn classifyInputFile(self: *MachO, path: Path, lib: SystemLib, must_link: bo } fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch { + const diags = &self.base.comp.link_diags; const fat_h = fat.readFatHeader(file) catch return null; if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null; var fat_archs_buffer: [2]fat.Arch = undefined; @@ -853,7 +847,7 @@ fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch { for (fat_archs) |arch| { if (arch.tag == cpu_arch) return arch; } - try self.reportParseError(path, "missing arch in universal file: expected {s}", .{ + try diags.reportParseError(path, "missing arch in universal file: expected {s}", .{ @tagName(cpu_arch), }); return error.MissingCpuArch; @@ -901,6 +895,7 @@ pub fn parseInputFiles(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); + const diags = &self.base.comp.link_diags; const tp = self.base.comp.thread_pool; var wg: WaitGroup = .{}; @@ -916,7 +911,7 @@ pub fn parseInputFiles(self: *MachO) !void { } } - if (self.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn parseInputFileWorker(self: *MachO, file: File) void { @@ -928,9 +923,9 @@ fn parseInputFileWorker(self: *MachO, file: File) void { error.InvalidMachineType, error.InvalidTarget, => {}, // already reported + else => |e| self.reportParseError2(file.getIndex(), "unexpected error: parsing input file failed with error {s}", .{@errorName(e)}) catch {}, } - _ = self.has_errors.swap(true, .seq_cst); }; } @@ -1296,6 +1291,7 @@ fn markLive(self: *MachO) void { fn convertTentativeDefsAndResolveSpecialSymbols(self: *MachO) !void { const tp = self.base.comp.thread_pool; + const diags = &self.base.comp.link_diags; var wg: WaitGroup = .{}; { wg.reset(); @@ -1307,7 +1303,7 @@ fn convertTentativeDefsAndResolveSpecialSymbols(self: *MachO) !void { tp.spawnWg(&wg, resolveSpecialSymbolsWorker, .{ self, obj }); } } - if (self.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn convertTentativeDefinitionsWorker(self: *MachO, object: *Object) void { @@ -1319,26 +1315,19 @@ fn convertTentativeDefinitionsWorker(self: *MachO, object: *Object) void { "unexpected error occurred while converting tentative symbols into defined symbols: {s}", .{@errorName(err)}, ) catch {}; - _ = self.has_errors.swap(true, .seq_cst); }; } fn resolveSpecialSymbolsWorker(self: *MachO, obj: *InternalObject) void { const tracy = trace(@src()); defer tracy.end(); - obj.resolveBoundarySymbols(self) catch |err| { - self.reportUnexpectedError("unexpected error occurred while resolving boundary symbols: {s}", .{ - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); - return; - }; - obj.resolveObjcMsgSendSymbols(self) catch |err| { - self.reportUnexpectedError("unexpected error occurred while resolving ObjC msgsend stubs: {s}", .{ - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); - }; + + const diags = &self.base.comp.link_diags; + + obj.resolveBoundarySymbols(self) catch |err| + return diags.addError("failed to resolve boundary symbols: {s}", .{@errorName(err)}); + obj.resolveObjcMsgSendSymbols(self) catch |err| + return diags.addError("failed to resolve ObjC msgsend stubs: {s}", .{@errorName(err)}); } pub fn dedupLiterals(self: *MachO) !void { @@ -1390,6 +1379,8 @@ fn checkDuplicates(self: *MachO) !void { defer tracy.end(); const tp = self.base.comp.thread_pool; + const diags = &self.base.comp.link_diags; + var wg: WaitGroup = .{}; { wg.reset(); @@ -1405,7 +1396,7 @@ fn checkDuplicates(self: *MachO) !void { } } - if (self.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; try self.reportDuplicates(); } @@ -1417,7 +1408,6 @@ fn checkDuplicatesWorker(self: *MachO, file: File) void { self.reportParseError2(file.getIndex(), "failed to check for duplicate definitions: {s}", .{ @errorName(err), }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); }; } @@ -1460,6 +1450,8 @@ fn scanRelocs(self: *MachO) !void { defer tracy.end(); const tp = self.base.comp.thread_pool; + const diags = &self.base.comp.link_diags; + var wg: WaitGroup = .{}; { @@ -1477,7 +1469,7 @@ fn scanRelocs(self: *MachO) !void { } } - if (self.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; if (self.getInternalObject()) |obj| { try obj.checkUndefs(self); @@ -1503,7 +1495,6 @@ fn scanRelocsWorker(self: *MachO, file: File) void { self.reportParseError2(file.getIndex(), "failed to scan relocations: {s}", .{ @errorName(err), }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); }; } @@ -1527,6 +1518,7 @@ fn reportUndefs(self: *MachO) !void { if (self.undefs.keys().len == 0) return; // Nothing to do const gpa = self.base.comp.gpa; + const diags = &self.base.comp.link_diags; const max_notes = 4; // We will sort by name, and then by file to ensure deterministic output. @@ -1558,7 +1550,7 @@ fn reportUndefs(self: *MachO) !void { break :nnotes @min(nnotes, max_notes) + @intFromBool(nnotes > max_notes); }; - var err = try self.base.addErrorWithNotes(nnotes); + var err = try diags.addErrorWithNotes(nnotes); try err.addMsg("undefined symbol: {s}", .{undef_sym.getName(self)}); switch (notes) { @@ -1908,6 +1900,7 @@ fn calcSectionSizes(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); + const diags = &self.base.comp.link_diags; const cpu_arch = self.getTarget().cpu.arch; if (self.data_sect_index) |idx| { @@ -1951,7 +1944,7 @@ fn calcSectionSizes(self: *MachO) !void { } } - if (self.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; try self.calcSymtabSize(); @@ -2003,6 +1996,9 @@ fn calcSectionSizes(self: *MachO) !void { fn calcSectionSizeWorker(self: *MachO, sect_id: u8) void { const tracy = trace(@src()); defer tracy.end(); + + const diags = &self.base.comp.link_diags; + const doWork = struct { fn doWork(macho_file: *MachO, header: *macho.section_64, atoms: []const Ref) !void { for (atoms) |ref| { @@ -2020,26 +2016,21 @@ fn calcSectionSizeWorker(self: *MachO, sect_id: u8) void { const header = &slice.items(.header)[sect_id]; const atoms = slice.items(.atoms)[sect_id].items; doWork(self, header, atoms) catch |err| { - self.reportUnexpectedError("failed to calculate size of section '{s},{s}': {s}", .{ - header.segName(), - header.sectName(), - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); + try diags.addError("failed to calculate size of section '{s},{s}': {s}", .{ + header.segName(), header.sectName(), @errorName(err), + }); }; } fn createThunksWorker(self: *MachO, sect_id: u8) void { const tracy = trace(@src()); defer tracy.end(); + const diags = &self.base.comp.link_diags; self.createThunks(sect_id) catch |err| { const header = self.sections.items(.header)[sect_id]; - self.reportUnexpectedError("failed to create thunks and calculate size of section '{s},{s}': {s}", .{ - header.segName(), - header.sectName(), - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); + diags.addError("failed to create thunks and calculate size of section '{s},{s}': {s}", .{ + header.segName(), header.sectName(), @errorName(err), + }); }; } @@ -2047,6 +2038,8 @@ fn generateUnwindInfo(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); + const diags = &self.base.comp.link_diags; + if (self.eh_frame_sect_index) |index| { const sect = &self.sections.items(.header)[index]; sect.size = try eh_frame.calcSize(self); @@ -2055,10 +2048,7 @@ fn generateUnwindInfo(self: *MachO) !void { if (self.unwind_info_sect_index) |index| { const sect = &self.sections.items(.header)[index]; self.unwind_info.generate(self) catch |err| switch (err) { - error.TooManyPersonalities => return self.reportUnexpectedError( - "too many personalities in unwind info", - .{}, - ), + error.TooManyPersonalities => return diags.fail("too many personalities in unwind info", .{}), else => |e| return e, }; sect.size = self.unwind_info.calcSize(); @@ -2427,6 +2417,7 @@ fn writeSectionsAndUpdateLinkeditSizes(self: *MachO) !void { defer tracy.end(); const gpa = self.base.comp.gpa; + const diags = &self.base.comp.link_diags; const cmd = self.symtab_cmd; try self.symtab.resize(gpa, cmd.nsyms); @@ -2495,7 +2486,7 @@ fn writeSectionsAndUpdateLinkeditSizes(self: *MachO) !void { }; } - if (self.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn writeAtomsWorker(self: *MachO, file: File) void { @@ -2505,13 +2496,15 @@ fn writeAtomsWorker(self: *MachO, file: File) void { self.reportParseError2(file.getIndex(), "failed to resolve relocations and write atoms: {s}", .{ @errorName(err), }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); }; } fn writeThunkWorker(self: *MachO, thunk: Thunk) void { const tracy = trace(@src()); defer tracy.end(); + + const diags = &self.base.comp.link_diags; + const doWork = struct { fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void { const off = math.cast(usize, th.value) orelse return error.Overflow; @@ -2522,8 +2515,7 @@ fn writeThunkWorker(self: *MachO, thunk: Thunk) void { }.doWork; const out = self.sections.items(.out)[thunk.out_n_sect].items; doWork(thunk, out, self) catch |err| { - self.reportUnexpectedError("failed to write contents of thunk: {s}", .{@errorName(err)}) catch {}; - _ = self.has_errors.swap(true, .seq_cst); + diags.addError("failed to write contents of thunk: {s}", .{@errorName(err)}); }; } @@ -2531,6 +2523,8 @@ fn writeSyntheticSectionWorker(self: *MachO, sect_id: u8, out: []u8) void { const tracy = trace(@src()); defer tracy.end(); + const diags = &self.base.comp.link_diags; + const Tag = enum { eh_frame, unwind_info, @@ -2575,18 +2569,18 @@ fn writeSyntheticSectionWorker(self: *MachO, sect_id: u8, out: []u8) void { unreachable; }; doWork(self, tag, out) catch |err| { - self.reportUnexpectedError("could not write section '{s},{s}': {s}", .{ - header.segName(), - header.sectName(), - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); + diags.addError("could not write section '{s},{s}': {s}", .{ + header.segName(), header.sectName(), @errorName(err), + }); }; } fn updateLazyBindSizeWorker(self: *MachO) void { const tracy = trace(@src()); defer tracy.end(); + + const diags = &self.base.comp.link_diags; + const doWork = struct { fn doWork(macho_file: *MachO) !void { try macho_file.lazy_bind_section.updateSize(macho_file); @@ -2596,12 +2590,8 @@ fn updateLazyBindSizeWorker(self: *MachO) void { try macho_file.stubs_helper.write(macho_file, stream.writer()); } }.doWork; - doWork(self) catch |err| { - self.reportUnexpectedError("could not calculate size of lazy binding section: {s}", .{ - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); - }; + doWork(self) catch |err| + diags.addError("could not calculate size of lazy binding section: {s}", .{@errorName(err)}); } pub fn updateLinkeditSizeWorker(self: *MachO, tag: enum { @@ -2611,6 +2601,7 @@ pub fn updateLinkeditSizeWorker(self: *MachO, tag: enum { export_trie, data_in_code, }) void { + const diags = &self.base.comp.link_diags; const res = switch (tag) { .rebase => self.rebase_section.updateSize(self), .bind => self.bind_section.updateSize(self), @@ -2618,13 +2609,8 @@ pub fn updateLinkeditSizeWorker(self: *MachO, tag: enum { .export_trie => self.export_trie.updateSize(self), .data_in_code => self.data_in_code.updateSize(self), }; - res catch |err| { - self.reportUnexpectedError("could not calculate size of {s} section: {s}", .{ - @tagName(tag), - @errorName(err), - }) catch {}; - _ = self.has_errors.swap(true, .seq_cst); - }; + res catch |err| + diags.addError("could not calculate size of {s} section: {s}", .{ @tagName(tag), @errorName(err) }); } fn writeSectionsToFile(self: *MachO) !void { @@ -3432,6 +3418,7 @@ pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void { } fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void { + const diags = &self.base.comp.link_diags; const sect = &self.sections.items(.header)[sect_index]; const seg_id = self.sections.items(.segment_id)[sect_index]; @@ -3467,7 +3454,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo const mem_capacity = self.allocatedSizeVirtual(seg.vmaddr); if (needed_size > mem_capacity) { - var err = try self.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("fatal linker error: cannot expand segment seg({d})({s}) in virtual memory", .{ seg_id, seg.segName(), @@ -3766,41 +3753,18 @@ pub fn eatPrefix(path: []const u8, prefix: []const u8) ?[]const u8 { return null; } -pub fn reportParseError( - self: *MachO, - path: Path, - comptime format: []const u8, - args: anytype, -) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(1); - try err.addMsg(format, args); - try err.addNote("while parsing {}", .{path}); -} - pub fn reportParseError2( self: *MachO, file_index: File.Index, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(1); + const diags = &self.base.comp.link_diags; + var err = try diags.addErrorWithNotes(1); try err.addMsg(format, args); try err.addNote("while parsing {}", .{self.getFile(file_index).?.fmtPath()}); } -fn reportMissingLibraryError( - self: *MachO, - checked_paths: []const []const u8, - comptime format: []const u8, - args: anytype, -) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(checked_paths.len); - try err.addMsg(format, args); - for (checked_paths) |path| { - try err.addNote("tried {s}", .{path}); - } -} - fn reportMissingDependencyError( self: *MachO, parent: File.Index, @@ -3809,7 +3773,8 @@ fn reportMissingDependencyError( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(2 + checked_paths.len); + const diags = &self.base.comp.link_diags; + var err = try diags.addErrorWithNotes(2 + checked_paths.len); try err.addMsg(format, args); try err.addNote("while resolving {s}", .{path}); try err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()}); @@ -3825,18 +3790,13 @@ fn reportDependencyError( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(2); + const diags = &self.base.comp.link_diags; + var err = try diags.addErrorWithNotes(2); try err.addMsg(format, args); try err.addNote("while parsing {s}", .{path}); try err.addNote("a dependency of {}", .{self.getFile(parent).?.fmtPath()}); } -pub fn reportUnexpectedError(self: *MachO, comptime format: []const u8, args: anytype) error{OutOfMemory}!void { - var err = try self.base.addErrorWithNotes(1); - try err.addMsg(format, args); - try err.addNote("please report this as a linker bug on https://github.com/ziglang/zig/issues/new/choose", .{}); -} - fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void { const tracy = trace(@src()); defer tracy.end(); @@ -3844,6 +3804,7 @@ fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void { if (self.dupes.keys().len == 0) return; // Nothing to do const gpa = self.base.comp.gpa; + const diags = &self.base.comp.link_diags; const max_notes = 3; // We will sort by name, and then by file to ensure deterministic output. @@ -3861,7 +3822,7 @@ fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void { const notes = self.dupes.get(key).?; const nnotes = @min(notes.items.len, max_notes) + @intFromBool(notes.items.len > max_notes); - var err = try self.base.addErrorWithNotes(nnotes + 1); + var err = try diags.addErrorWithNotes(nnotes + 1); try err.addMsg("duplicate symbol definition: {s}", .{sym.getName(self)}); try err.addNote("defined by {}", .{sym.getFile(self).?.fmtPath()}); diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index ba3c66689b..8375abbc16 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -6,6 +6,7 @@ pub fn deinit(self: *Archive, allocator: Allocator) void { pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File.HandleIndex, fat_arch: ?fat.Arch) !void { const gpa = macho_file.base.comp.gpa; + const diags = &macho_file.base.comp.link_diags; var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); @@ -28,7 +29,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File pos += @sizeOf(ar_hdr); if (!mem.eql(u8, &hdr.ar_fmag, ARFMAG)) { - try macho_file.reportParseError(path, "invalid header delimiter: expected '{s}', found '{s}'", .{ + try diags.reportParseError(path, "invalid header delimiter: expected '{s}', found '{s}'", .{ std.fmt.fmtSliceEscapeLower(ARFMAG), std.fmt.fmtSliceEscapeLower(&hdr.ar_fmag), }); return error.MalformedArchive; diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index 6e3b4311eb..f8bf9c37e7 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -893,6 +893,7 @@ fn resolveRelocInner( const x86_64 = struct { fn relaxGotLoad(self: Atom, code: []u8, rel: Relocation, macho_file: *MachO) ResolveError!void { dev.check(.x86_64_backend); + const diags = &macho_file.base.comp.link_diags; const old_inst = disassemble(code) orelse return error.RelaxFail; switch (old_inst.encoding.mnemonic) { .mov => { @@ -901,7 +902,7 @@ const x86_64 = struct { encode(&.{inst}, code) catch return error.RelaxFail; }, else => |x| { - var err = try macho_file.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("{s}: 0x{x}: 0x{x}: failed to relax relocation of type {}", .{ self.getName(macho_file), self.getAddress(macho_file), diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 2a89e5da56..666a73f600 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -364,6 +364,8 @@ pub fn scanRelocs(self: *ZigObject, macho_file: *MachO) !void { pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void { const gpa = macho_file.base.comp.gpa; + const diags = &macho_file.base.comp.link_diags; + var has_error = false; for (self.getAtoms()) |atom_index| { const atom = self.getAtom(atom_index) orelse continue; @@ -379,17 +381,12 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void { defer gpa.free(code); self.getAtomData(macho_file, atom.*, code) catch |err| { switch (err) { - error.InputOutput => { - try macho_file.reportUnexpectedError("fetching code for '{s}' failed", .{ - atom.getName(macho_file), - }); - }, - else => |e| { - try macho_file.reportUnexpectedError("unexpected error while fetching code for '{s}': {s}", .{ - atom.getName(macho_file), - @errorName(e), - }); - }, + error.InputOutput => return diags.fail("fetching code for '{s}' failed", .{ + atom.getName(macho_file), + }), + else => |e| return diags.fail("failed to fetch code for '{s}': {s}", .{ + atom.getName(macho_file), @errorName(e), + }), } has_error = true; continue; @@ -398,9 +395,7 @@ pub fn resolveRelocs(self: *ZigObject, macho_file: *MachO) !void { atom.resolveRelocs(macho_file, code) catch |err| { switch (err) { error.ResolveFailed => {}, - else => |e| { - try macho_file.reportUnexpectedError("unexpected error while resolving relocations: {s}", .{@errorName(e)}); - }, + else => |e| return diags.fail("failed to resolve relocations: {s}", .{@errorName(e)}), } has_error = true; continue; @@ -426,6 +421,7 @@ pub fn calcNumRelocs(self: *ZigObject, macho_file: *MachO) void { pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void { const gpa = macho_file.base.comp.gpa; + const diags = &macho_file.base.comp.link_diags; for (self.getAtoms()) |atom_index| { const atom = self.getAtom(atom_index) orelse continue; @@ -439,21 +435,8 @@ pub fn writeRelocs(self: *ZigObject, macho_file: *MachO) !void { const atom_size = std.math.cast(usize, atom.size) orelse return error.Overflow; const code = try gpa.alloc(u8, atom_size); defer gpa.free(code); - self.getAtomData(macho_file, atom.*, code) catch |err| switch (err) { - error.InputOutput => { - try macho_file.reportUnexpectedError("fetching code for '{s}' failed", .{ - atom.getName(macho_file), - }); - return error.FlushFailure; - }, - else => |e| { - try macho_file.reportUnexpectedError("unexpected error while fetching code for '{s}': {s}", .{ - atom.getName(macho_file), - @errorName(e), - }); - return error.FlushFailure; - }, - }; + self.getAtomData(macho_file, atom.*, code) catch |err| + return diags.fail("failed to fetch code for '{s}': {s}", .{ atom.getName(macho_file), @errorName(err) }); const file_offset = header.offset + atom.value; try atom.writeRelocs(macho_file, code, relocs[extra.rel_out_index..][0..extra.rel_out_count]); try macho_file.base.file.?.pwriteAll(code, file_offset); diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index 91bb295ed6..e45ff0f3c2 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -1,5 +1,6 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { const gpa = macho_file.base.comp.gpa; + const diags = &macho_file.base.comp.link_diags; // TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list. var positionals = std.ArrayList(Compilation.LinkObject).init(gpa); @@ -29,8 +30,8 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat for (positionals.items) |obj| { macho_file.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err| switch (err) { - error.UnknownFileType => try macho_file.reportParseError(obj.path, "unknown file type for an input file", .{}), - else => |e| try macho_file.reportParseError( + error.UnknownFileType => try diags.reportParseError(obj.path, "unknown file type for an input file", .{}), + else => |e| try diags.reportParseError( obj.path, "unexpected error: reading input file failed with error {s}", .{@errorName(e)}, @@ -38,11 +39,11 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat }; } - if (macho_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; try macho_file.parseInputFiles(); - if (macho_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; try macho_file.resolveSymbols(); try macho_file.dedupLiterals(); @@ -75,6 +76,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void { const gpa = comp.gpa; + const diags = &macho_file.base.comp.link_diags; var positionals = std.ArrayList(Compilation.LinkObject).init(gpa); defer positionals.deinit(); @@ -94,8 +96,8 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? for (positionals.items) |obj| { macho_file.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err| switch (err) { - error.UnknownFileType => try macho_file.reportParseError(obj.path, "unknown file type for an input file", .{}), - else => |e| try macho_file.reportParseError( + error.UnknownFileType => try diags.reportParseError(obj.path, "unknown file type for an input file", .{}), + else => |e| try diags.reportParseError( obj.path, "unexpected error: reading input file failed with error {s}", .{@errorName(e)}, @@ -103,11 +105,11 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? }; } - if (macho_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; try parseInputFilesAr(macho_file); - if (macho_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; // First, we flush relocatable object file generated with our backends. if (macho_file.getZigObject()) |zo| { @@ -228,7 +230,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ? try macho_file.base.file.?.setEndPos(total_size); try macho_file.base.file.?.pwriteAll(buffer.items, 0); - if (macho_file.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; } fn parseInputFilesAr(macho_file: *MachO) !void { @@ -293,6 +295,8 @@ fn calcSectionSizes(macho_file: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); + const diags = &macho_file.base.comp.link_diags; + if (macho_file.getZigObject()) |zo| { // TODO this will create a race as we need to track merging of debug sections which we currently don't zo.calcNumRelocs(macho_file); @@ -337,7 +341,7 @@ fn calcSectionSizes(macho_file: *MachO) !void { } try calcSymtabSize(macho_file); - if (macho_file.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; } fn calcSectionSizeWorker(macho_file: *MachO, sect_id: u8) void { @@ -365,6 +369,8 @@ fn calcEhFrameSizeWorker(macho_file: *MachO) void { const tracy = trace(@src()); defer tracy.end(); + const diags = &macho_file.base.comp.link_diags; + const doWork = struct { fn doWork(mfile: *MachO, header: *macho.section_64) !void { header.size = try eh_frame.calcSize(mfile); @@ -374,12 +380,8 @@ fn calcEhFrameSizeWorker(macho_file: *MachO) void { }.doWork; const header = &macho_file.sections.items(.header)[macho_file.eh_frame_sect_index.?]; - doWork(macho_file, header) catch |err| { - macho_file.reportUnexpectedError("failed to calculate size of section '__TEXT,__eh_frame': {s}", .{ - @errorName(err), - }) catch {}; - _ = macho_file.has_errors.swap(true, .seq_cst); - }; + doWork(macho_file, header) catch |err| + diags.addError("failed to calculate size of section '__TEXT,__eh_frame': {s}", .{@errorName(err)}); } fn calcCompactUnwindSize(macho_file: *MachO) void { @@ -592,6 +594,7 @@ fn writeSections(macho_file: *MachO) !void { defer tracy.end(); const gpa = macho_file.base.comp.gpa; + const diags = &macho_file.base.comp.link_diags; const cpu_arch = macho_file.getTarget().cpu.arch; const slice = macho_file.sections.slice(); for (slice.items(.header), slice.items(.out), slice.items(.relocs), 0..) |header, *out, *relocs, n_sect| { @@ -637,7 +640,7 @@ fn writeSections(macho_file: *MachO) !void { } } - if (macho_file.has_errors.swap(false, .seq_cst)) return error.FlushFailure; + if (diags.hasErrors()) return error.LinkFailure; if (macho_file.getZigObject()) |zo| { try zo.writeRelocs(macho_file); @@ -651,33 +654,28 @@ fn writeAtomsWorker(macho_file: *MachO, file: File) void { macho_file.reportParseError2(file.getIndex(), "failed to write atoms: {s}", .{ @errorName(err), }) catch {}; - _ = macho_file.has_errors.swap(true, .seq_cst); }; } fn writeEhFrameWorker(macho_file: *MachO) void { const tracy = trace(@src()); defer tracy.end(); + + const diags = &macho_file.base.comp.link_diags; const sect_index = macho_file.eh_frame_sect_index.?; const buffer = macho_file.sections.items(.out)[sect_index]; const relocs = macho_file.sections.items(.relocs)[sect_index]; - eh_frame.writeRelocs(macho_file, buffer.items, relocs.items) catch |err| { - macho_file.reportUnexpectedError("failed to write '__LD,__eh_frame' section: {s}", .{ - @errorName(err), - }) catch {}; - _ = macho_file.has_errors.swap(true, .seq_cst); - }; + eh_frame.writeRelocs(macho_file, buffer.items, relocs.items) catch |err| + diags.addError("failed to write '__LD,__eh_frame' section: {s}", .{@errorName(err)}); } fn writeCompactUnwindWorker(macho_file: *MachO, object: *Object) void { const tracy = trace(@src()); defer tracy.end(); - object.writeCompactUnwindRelocatable(macho_file) catch |err| { - macho_file.reportUnexpectedError("failed to write '__LD,__eh_frame' section: {s}", .{ - @errorName(err), - }) catch {}; - _ = macho_file.has_errors.swap(true, .seq_cst); - }; + + const diags = &macho_file.base.comp.link_diags; + object.writeCompactUnwindRelocatable(macho_file) catch |err| + diags.addError("failed to write '__LD,__eh_frame' section: {s}", .{@errorName(err)}); } fn writeSectionsToFile(macho_file: *MachO) !void { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 6a27909b31..df4131b7fe 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -649,6 +649,8 @@ fn parseInputFiles(wasm: *Wasm, files: []const []const u8) !void { /// file and parsed successfully. Returns false when file is not an object file. /// May return an error instead when parsing failed. fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool { + const diags = &wasm.base.comp.link_diags; + const obj_file = try fs.cwd().openFile(path, .{}); errdefer obj_file.close(); @@ -656,7 +658,7 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool { var object = Object.create(wasm, obj_file, path, null) catch |err| switch (err) { error.InvalidMagicByte, error.NotObjectFile => return false, else => |e| { - var err_note = try wasm.base.addErrorWithNotes(1); + var err_note = try diags.addErrorWithNotes(1); try err_note.addMsg("Failed parsing object file: {s}", .{@errorName(e)}); try err_note.addNote("while parsing '{s}'", .{path}); return error.FlushFailure; @@ -698,6 +700,7 @@ pub inline fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom { /// are referenced by other object files or Zig code. fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool { const gpa = wasm.base.comp.gpa; + const diags = &wasm.base.comp.link_diags; const archive_file = try fs.cwd().openFile(path, .{}); errdefer archive_file.close(); @@ -712,7 +715,7 @@ fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool { return false; }, else => |e| { - var err_note = try wasm.base.addErrorWithNotes(1); + var err_note = try diags.addErrorWithNotes(1); try err_note.addMsg("Failed parsing archive: {s}", .{@errorName(e)}); try err_note.addNote("while parsing archive {s}", .{path}); return error.FlushFailure; @@ -739,7 +742,7 @@ fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool { for (offsets.keys()) |file_offset| { var object = archive.parseObject(wasm, file_offset) catch |e| { - var err_note = try wasm.base.addErrorWithNotes(1); + var err_note = try diags.addErrorWithNotes(1); try err_note.addMsg("Failed parsing object: {s}", .{@errorName(e)}); try err_note.addNote("while parsing object in archive {s}", .{path}); return error.FlushFailure; @@ -763,6 +766,7 @@ fn requiresTLSReloc(wasm: *const Wasm) bool { fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { const gpa = wasm.base.comp.gpa; + const diags = &wasm.base.comp.link_diags; const obj_file = wasm.file(file_index).?; log.debug("Resolving symbols in object: '{s}'", .{obj_file.path()}); @@ -777,7 +781,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { if (symbol.isLocal()) { if (symbol.isUndefined()) { - var err = try wasm.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("Local symbols are not allowed to reference imports", .{}); try err.addNote("symbol '{s}' defined in '{s}'", .{ sym_name, obj_file.path() }); } @@ -814,7 +818,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { break :outer; // existing is weak, while new one isn't. Replace it. } // both are defined and weak, we have a symbol collision. - var err = try wasm.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("symbol '{s}' defined multiple times", .{sym_name}); try err.addNote("first definition in '{s}'", .{existing_file_path}); try err.addNote("next definition in '{s}'", .{obj_file.path()}); @@ -825,7 +829,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { } if (symbol.tag != existing_sym.tag) { - var err = try wasm.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("symbol '{s}' mismatching types '{s}' and '{s}'", .{ sym_name, @tagName(symbol.tag), @tagName(existing_sym.tag) }); try err.addNote("first definition in '{s}'", .{existing_file_path}); try err.addNote("next definition in '{s}'", .{obj_file.path()}); @@ -845,7 +849,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { const imp = obj_file.import(sym_index); const module_name = obj_file.string(imp.module_name); if (!mem.eql(u8, existing_name, module_name)) { - var err = try wasm.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("symbol '{s}' module name mismatch. Expected '{s}', but found '{s}'", .{ sym_name, existing_name, @@ -865,7 +869,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { const existing_ty = wasm.getGlobalType(existing_loc); const new_ty = wasm.getGlobalType(location); if (existing_ty.mutable != new_ty.mutable or existing_ty.valtype != new_ty.valtype) { - var err = try wasm.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("symbol '{s}' mismatching global types", .{sym_name}); try err.addNote("first definition in '{s}'", .{existing_file_path}); try err.addNote("next definition in '{s}'", .{obj_file.path()}); @@ -876,7 +880,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { const existing_ty = wasm.getFunctionSignature(existing_loc); const new_ty = wasm.getFunctionSignature(location); if (!existing_ty.eql(new_ty)) { - var err = try wasm.base.addErrorWithNotes(3); + var err = try diags.addErrorWithNotes(3); try err.addMsg("symbol '{s}' mismatching function signatures.", .{sym_name}); try err.addNote("expected signature {}, but found signature {}", .{ existing_ty, new_ty }); try err.addNote("first definition in '{s}'", .{existing_file_path}); @@ -909,6 +913,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, file_index: File.Index) !void { fn resolveSymbolsInArchives(wasm: *Wasm) !void { const gpa = wasm.base.comp.gpa; + const diags = &wasm.base.comp.link_diags; if (wasm.archives.items.len == 0) return; log.debug("Resolving symbols in archives", .{}); @@ -928,7 +933,7 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { // Parse object and and resolve symbols again before we check remaining // undefined symbols. var object = archive.parseObject(wasm, offset.items[0]) catch |e| { - var err_note = try wasm.base.addErrorWithNotes(1); + var err_note = try diags.addErrorWithNotes(1); try err_note.addMsg("Failed parsing object: {s}", .{@errorName(e)}); try err_note.addNote("while parsing object in archive {s}", .{archive.name}); return error.FlushFailure; @@ -1172,6 +1177,7 @@ fn validateFeatures( emit_features_count: *u32, ) !void { const comp = wasm.base.comp; + const diags = &wasm.base.comp.link_diags; const target = comp.root_mod.resolved_target.result; const shared_memory = comp.config.shared_memory; const cpu_features = target.cpu.features; @@ -1235,7 +1241,7 @@ fn validateFeatures( allowed[used_index] = is_enabled; emit_features_count.* += @intFromBool(is_enabled); } else if (is_enabled and !allowed[used_index]) { - var err = try wasm.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("feature '{}' not allowed, but used by linked object", .{@as(types.Feature.Tag, @enumFromInt(used_index))}); try err.addNote("defined in '{s}'", .{wasm.files.items(.data)[used_set >> 1].object.path}); valid_feature_set = false; @@ -1249,7 +1255,7 @@ fn validateFeatures( if (shared_memory) { const disallowed_feature = disallowed[@intFromEnum(types.Feature.Tag.shared_mem)]; if (@as(u1, @truncate(disallowed_feature)) != 0) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg( "shared-memory is disallowed by '{s}' because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled", .{wasm.files.items(.data)[disallowed_feature >> 1].object.path}, @@ -1259,7 +1265,7 @@ fn validateFeatures( for ([_]types.Feature.Tag{ .atomics, .bulk_memory }) |feature| { if (!allowed[@intFromEnum(feature)]) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("feature '{}' is not used but is required for shared-memory", .{feature}); } } @@ -1268,7 +1274,7 @@ fn validateFeatures( if (has_tls) { for ([_]types.Feature.Tag{ .atomics, .bulk_memory }) |feature| { if (!allowed[@intFromEnum(feature)]) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("feature '{}' is not used but is required for thread-local storage", .{feature}); } } @@ -1282,7 +1288,7 @@ fn validateFeatures( // from here a feature is always used const disallowed_feature = disallowed[@intFromEnum(feature.tag)]; if (@as(u1, @truncate(disallowed_feature)) != 0) { - var err = try wasm.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("feature '{}' is disallowed, but used by linked object", .{feature.tag}); try err.addNote("disallowed by '{s}'", .{wasm.files.items(.data)[disallowed_feature >> 1].object.path}); try err.addNote("used in '{s}'", .{object.path}); @@ -1296,7 +1302,7 @@ fn validateFeatures( for (required, 0..) |required_feature, feature_index| { const is_required = @as(u1, @truncate(required_feature)) != 0; if (is_required and !object_used_features[feature_index]) { - var err = try wasm.base.addErrorWithNotes(2); + var err = try diags.addErrorWithNotes(2); try err.addMsg("feature '{}' is required but not used in linked object", .{@as(types.Feature.Tag, @enumFromInt(feature_index))}); try err.addNote("required by '{s}'", .{wasm.files.items(.data)[required_feature >> 1].object.path}); try err.addNote("missing in '{s}'", .{object.path}); @@ -1364,6 +1370,7 @@ pub fn findGlobalSymbol(wasm: *Wasm, name: []const u8) ?SymbolLoc { fn checkUndefinedSymbols(wasm: *const Wasm) !void { const comp = wasm.base.comp; + const diags = &wasm.base.comp.link_diags; if (comp.config.output_mode == .Obj) return; if (wasm.import_symbols) return; @@ -1377,7 +1384,7 @@ fn checkUndefinedSymbols(wasm: *const Wasm) !void { else wasm.name; const symbol_name = undef.getName(wasm); - var err = try wasm.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("could not resolve undefined symbol '{s}'", .{symbol_name}); try err.addNote("defined in '{s}'", .{file_name}); } @@ -1736,6 +1743,7 @@ fn sortDataSegments(wasm: *Wasm) !void { /// contain any parameters. fn setupInitFunctions(wasm: *Wasm) !void { const gpa = wasm.base.comp.gpa; + const diags = &wasm.base.comp.link_diags; // There's no constructors for Zig so we can simply search through linked object files only. for (wasm.objects.items) |file_index| { const object: Object = wasm.files.items(.data)[@intFromEnum(file_index)].object; @@ -1751,7 +1759,7 @@ fn setupInitFunctions(wasm: *Wasm) !void { break :ty object.func_types[func.type_index]; }; if (ty.params.len != 0) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("constructor functions cannot take arguments: '{s}'", .{object.string_table.get(symbol.name)}); } log.debug("appended init func '{s}'\n", .{object.string_table.get(symbol.name)}); @@ -2130,12 +2138,13 @@ fn mergeTypes(wasm: *Wasm) !void { fn checkExportNames(wasm: *Wasm) !void { const force_exp_names = wasm.export_symbol_names; + const diags = &wasm.base.comp.link_diags; if (force_exp_names.len > 0) { var failed_exports = false; for (force_exp_names) |exp_name| { const loc = wasm.findGlobalSymbol(exp_name) orelse { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("could not export '{s}', symbol not found", .{exp_name}); failed_exports = true; continue; @@ -2195,18 +2204,19 @@ fn setupExports(wasm: *Wasm) !void { fn setupStart(wasm: *Wasm) !void { const comp = wasm.base.comp; + const diags = &wasm.base.comp.link_diags; // do not export entry point if user set none or no default was set. const entry_name = wasm.entry_name orelse return; const symbol_loc = wasm.findGlobalSymbol(entry_name) orelse { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Entry symbol '{s}' missing, use '-fno-entry' to suppress", .{entry_name}); return error.FlushFailure; }; const symbol = symbol_loc.getSymbol(wasm); if (symbol.tag != .function) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Entry symbol '{s}' is not a function", .{entry_name}); return error.FlushFailure; } @@ -2220,6 +2230,7 @@ fn setupStart(wasm: *Wasm) !void { /// Sets up the memory section of the wasm module, as well as the stack. fn setupMemory(wasm: *Wasm) !void { const comp = wasm.base.comp; + const diags = &wasm.base.comp.link_diags; const shared_memory = comp.config.shared_memory; log.debug("Setting up memory layout", .{}); const page_size = std.wasm.page_size; // 64kb @@ -2312,15 +2323,15 @@ fn setupMemory(wasm: *Wasm) !void { if (wasm.initial_memory) |initial_memory| { if (!std.mem.isAlignedGeneric(u64, initial_memory, page_size)) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Initial memory must be {d}-byte aligned", .{page_size}); } if (memory_ptr > initial_memory) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Initial memory too small, must be at least {d} bytes", .{memory_ptr}); } if (initial_memory > max_memory_allowed) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Initial memory exceeds maximum memory {d}", .{max_memory_allowed}); } memory_ptr = initial_memory; @@ -2338,15 +2349,15 @@ fn setupMemory(wasm: *Wasm) !void { if (wasm.max_memory) |max_memory| { if (!std.mem.isAlignedGeneric(u64, max_memory, page_size)) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Maximum memory must be {d}-byte aligned", .{page_size}); } if (memory_ptr > max_memory) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Maximum memory too small, must be at least {d} bytes", .{memory_ptr}); } if (max_memory > max_memory_allowed) { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("Maximum memory exceeds maximum amount {d}", .{max_memory_allowed}); } wasm.memories.limits.max = @as(u32, @intCast(max_memory / page_size)); @@ -2364,6 +2375,7 @@ fn setupMemory(wasm: *Wasm) !void { pub fn getMatchingSegment(wasm: *Wasm, file_index: File.Index, symbol_index: Symbol.Index) !u32 { const comp = wasm.base.comp; const gpa = comp.gpa; + const diags = &wasm.base.comp.link_diags; const obj_file = wasm.file(file_index).?; const symbol = obj_file.symbols()[@intFromEnum(symbol_index)]; const index: u32 = @intCast(wasm.segments.items.len); @@ -2450,7 +2462,7 @@ pub fn getMatchingSegment(wasm: *Wasm, file_index: File.Index, symbol_index: Sym break :blk index; }; } else { - var err = try wasm.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("found unknown section '{s}'", .{section_name}); try err.addNote("defined in '{s}'", .{obj_file.path()}); return error.UnexpectedValue; @@ -2487,6 +2499,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no defer tracy.end(); const comp = wasm.base.comp; + const diags = &comp.link_diags; if (wasm.llvm_object) |llvm_object| { try wasm.base.emitLlvmObject(arena, llvm_object, prog_node); const use_lld = build_options.have_llvm and comp.config.use_lld; @@ -2569,23 +2582,23 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no if (wasm.zig_object_index != .null) { try wasm.resolveSymbolsInObject(wasm.zig_object_index); } - if (wasm.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; for (wasm.objects.items) |object_index| { try wasm.resolveSymbolsInObject(object_index); } - if (wasm.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; var emit_features_count: u32 = 0; var enabled_features: [@typeInfo(types.Feature.Tag).@"enum".fields.len]bool = undefined; try wasm.validateFeatures(&enabled_features, &emit_features_count); try wasm.resolveSymbolsInArchives(); - if (wasm.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; try wasm.resolveLazySymbols(); try wasm.checkUndefinedSymbols(); try wasm.checkExportNames(); try wasm.setupInitFunctions(); - if (wasm.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; try wasm.setupStart(); try wasm.markReferences(); @@ -2594,7 +2607,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no try wasm.mergeTypes(); try wasm.allocateAtoms(); try wasm.setupMemory(); - if (wasm.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; wasm.allocateVirtualAddresses(); wasm.mapFunctionTable(); try wasm.initializeCallCtorsFunction(); @@ -2604,7 +2617,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no try wasm.setupStartSection(); try wasm.setupExports(); try wasm.writeToFile(enabled_features, emit_features_count, arena); - if (wasm.base.hasErrors()) return error.FlushFailure; + if (diags.hasErrors()) return error.FlushFailure; } /// Writes the WebAssembly in-memory module to the file @@ -2615,6 +2628,7 @@ fn writeToFile( arena: Allocator, ) !void { const comp = wasm.base.comp; + const diags = &comp.link_diags; const gpa = comp.gpa; const use_llvm = comp.config.use_llvm; const use_lld = build_options.have_llvm and comp.config.use_lld; @@ -3003,7 +3017,7 @@ fn writeToFile( try emitBuildIdSection(&binary_bytes, str); }, else => |mode| { - var err = try wasm.base.addErrorWithNotes(0); + var err = try diags.addErrorWithNotes(0); try err.addMsg("build-id '{s}' is not supported for WebAssembly", .{@tagName(mode)}); }, } @@ -3684,7 +3698,8 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: switch (term) { .Exited => |code| { if (code != 0) { - comp.lockAndParseLldStderr(linker_command, stderr); + const diags = &comp.link_diags; + diags.lockAndParseLldStderr(linker_command, stderr); return error.LLDReportedFailure; } }, diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index 81a3cac737..b4fc552025 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -226,6 +226,8 @@ pub fn findImport(object: *const Object, sym: Symbol) types.Import { /// /// When the object file is *NOT* MVP, we return `null`. fn checkLegacyIndirectFunctionTable(object: *Object, wasm_file: *const Wasm) !?Symbol { + const diags = &wasm_file.base.comp.link_diags; + var table_count: usize = 0; for (object.symtable) |sym| { if (sym.tag == .table) table_count += 1; @@ -235,7 +237,7 @@ fn checkLegacyIndirectFunctionTable(object: *Object, wasm_file: *const Wasm) !?S if (object.imported_tables_count == table_count) return null; if (table_count != 0) { - var err = try wasm_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("Expected a table entry symbol for each of the {d} table(s), but instead got {d} symbols.", .{ object.imported_tables_count, table_count, @@ -246,14 +248,14 @@ fn checkLegacyIndirectFunctionTable(object: *Object, wasm_file: *const Wasm) !?S // MVP object files cannot have any table definitions, only imports (for the indirect function table). if (object.tables.len > 0) { - var err = try wasm_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("Unexpected table definition without representing table symbols.", .{}); try err.addNote("defined in '{s}'", .{object.path}); return error.UnexpectedTable; } if (object.imported_tables_count != 1) { - var err = try wasm_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("Found more than one table import, but no representing table symbols", .{}); try err.addNote("defined in '{s}'", .{object.path}); return error.MissingTableSymbols; @@ -266,7 +268,7 @@ fn checkLegacyIndirectFunctionTable(object: *Object, wasm_file: *const Wasm) !?S } else unreachable; if (!std.mem.eql(u8, object.string_table.get(table_import.name), "__indirect_function_table")) { - var err = try wasm_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("Non-indirect function table import '{s}' is missing a corresponding symbol", .{object.string_table.get(table_import.name)}); try err.addNote("defined in '{s}'", .{object.path}); return error.MissingTableSymbols; @@ -587,6 +589,7 @@ fn Parser(comptime ReaderType: type) type { /// to be able to link. /// Logs an info message when an undefined feature is detected. fn parseFeatures(parser: *ObjectParser, gpa: Allocator) !void { + const diags = &parser.wasm_file.base.comp.link_diags; const reader = parser.reader.reader(); for (try readVec(&parser.object.features, reader, gpa)) |*feature| { const prefix = try readEnum(types.Feature.Prefix, reader); @@ -596,7 +599,7 @@ fn Parser(comptime ReaderType: type) type { try reader.readNoEof(name); const tag = types.known_features.get(name) orelse { - var err = try parser.wasm_file.base.addErrorWithNotes(1); + var err = try diags.addErrorWithNotes(1); try err.addMsg("Object file contains unknown feature: {s}", .{name}); try err.addNote("defined in '{s}'", .{parser.object.path}); return error.UnknownFeature;