From 5a6a1f8a8ad1475d328c998824981c7b310987d2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 11 Dec 2023 22:10:39 -0700 Subject: [PATCH] linker: update target references --- src/arch/wasm/Emit.zig | 3 +- src/link/Coff.zig | 11 ++- src/link/Coff/Relocation.zig | 3 +- src/link/Coff/lld.zig | 2 +- src/link/Elf/Object.zig | 3 +- src/link/Elf/SharedObject.zig | 3 +- src/link/Elf/synthetic_sections.zig | 6 +- src/link/MachO.zig | 126 +++++++++++++++++----------- src/link/MachO/Atom.zig | 28 ++++--- src/link/MachO/DebugSymbols.zig | 14 +++- src/link/MachO/Object.zig | 33 ++++---- src/link/MachO/Relocation.zig | 6 +- src/link/MachO/UnwindInfo.zig | 29 ++++--- src/link/MachO/dead_strip.zig | 62 +++++++------- src/link/MachO/eh_frame.zig | 35 ++++---- src/link/MachO/load_commands.zig | 6 +- src/link/MachO/zld.zig | 29 ++++--- src/link/Plan9.zig | 55 ++++++------ src/link/Wasm.zig | 11 ++- 19 files changed, 274 insertions(+), 191 deletions(-) diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index dbd85116a0..990eb359e8 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -406,7 +406,8 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void { const extra_index = emit.mir.instructions.items(.data)[inst].payload; const mem = emit.mir.extraData(Mir.Memory, extra_index).data; const mem_offset = emit.offset() + 1; - const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32; + const target = emit.bin_file.comp.root_mod.resolved_target.result; + const is_wasm32 = target.cpu.arch == .wasm32; if (is_wasm32) { try emit.code.append(std.wasm.opcode(.i32_const)); var buf: [5]u8 = undefined; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 789b9fe773..d99460d333 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1467,6 +1467,7 @@ pub fn updateExports( } const ip = &mod.intern_pool; + const target = self.base.comp.root_mod.resolved_target.result; if (self.base.options.use_llvm) { // Even in the case of LLVM, we need to notice certain exported symbols in order to @@ -1478,7 +1479,7 @@ pub fn updateExports( }; const exported_decl = mod.declPtr(exported_decl_index); if (exported_decl.getOwnedFunction(mod) == null) continue; - const winapi_cc = switch (self.base.options.target.cpu.arch) { + const winapi_cc = switch (target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, }; @@ -1487,7 +1488,7 @@ pub fn updateExports( self.base.options.link_libc) { mod.stage1_flags.have_c_main = true; - } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { + } else if (decl_cc == winapi_cc and target.os.tag == .windows) { if (ip.stringEqlSlice(exp.opts.name, "WinMain")) { mod.stage1_flags.have_winmain = true; } else if (ip.stringEqlSlice(exp.opts.name, "wWinMain")) { @@ -2200,6 +2201,7 @@ fn writeDataDirectoriesHeaders(self: *Coff) !void { } fn writeHeader(self: *Coff) !void { + const target = self.base.comp.root_mod.resolved_target.result; const gpa = self.base.comp.gpa; var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); @@ -2225,7 +2227,7 @@ fn writeHeader(self: *Coff) !void { const timestamp = std.time.timestamp(); const size_of_optional_header = @as(u16, @intCast(self.getOptionalHeaderSize() + self.getDataDirectoryHeadersSize())); var coff_header = coff.CoffHeader{ - .machine = coff.MachineType.fromTargetCpuArch(self.base.options.target.cpu.arch), + .machine = coff.MachineType.fromTargetCpuArch(target.cpu.arch), .number_of_sections = @as(u16, @intCast(self.sections.slice().len)), // TODO what if we prune a section .time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))), .pointer_to_symbol_table = self.strtab_offset orelse 0, @@ -2451,8 +2453,9 @@ pub fn getEntryPoint(self: Coff) ?SymbolWithLoc { } pub fn getImageBase(self: Coff) u64 { + const target = self.base.comp.root_mod.resolved_target.result; const image_base: u64 = self.base.options.image_base_override orelse switch (self.base.comp.config.output_mode) { - .Exe => switch (self.base.options.target.cpu.arch) { + .Exe => switch (target.cpu.arch) { .aarch64 => @as(u64, 0x140000000), .x86_64, .x86 => 0x400000, else => unreachable, // unsupported target architecture diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig index 84cfcdc18a..b25427feda 100644 --- a/src/link/Coff/Relocation.zig +++ b/src/link/Coff/Relocation.zig @@ -107,7 +107,8 @@ pub fn resolve(self: Relocation, atom_index: Atom.Index, code: []u8, image_base: .ptr_width = coff_file.ptr_width, }; - switch (coff_file.base.options.target.cpu.arch) { + const target = coff_file.base.comp.root_mod.resolved_target.result; + switch (target.cpu.arch) { .aarch64 => self.resolveAarch64(ctx), .x86, .x86_64 => self.resolveX86(ctx), else => unreachable, // unhandled target architecture diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig index 297900fae5..918a7004c1 100644 --- a/src/link/Coff/lld.zig +++ b/src/link/Coff/lld.zig @@ -49,7 +49,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod const is_dyn_lib = self.base.comp.config.link_mode == .Dynamic and is_lib; const is_exe_or_dyn_lib = is_dyn_lib or self.base.comp.config.output_mode == .Exe; const link_in_crt = self.base.options.link_libc and is_exe_or_dyn_lib; - const target = self.base.options.target; + const target = self.base.comp.root_mod.resolved_target.result; const optimize_mode = self.base.comp.root_mod.optimize_mode; // See link/Elf.zig for comments on how this mechanism works. diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 6b0cc66c33..347f96d451 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -54,7 +54,8 @@ pub fn parse(self: *Object, elf_file: *Elf) !void { self.header = try reader.readStruct(elf.Elf64_Ehdr); - if (elf_file.base.options.target.cpu.arch != self.header.?.e_machine.toTargetCpuArch().?) { + const target = elf_file.base.comp.root_mod.resolved_target.result; + if (target.cpu.arch != self.header.?.e_machine.toTargetCpuArch().?) { try elf_file.reportParseError2( self.index, "invalid cpu architecture: {s}", diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig index 2a39477805..6675f49941 100644 --- a/src/link/Elf/SharedObject.zig +++ b/src/link/Elf/SharedObject.zig @@ -53,7 +53,8 @@ pub fn parse(self: *SharedObject, elf_file: *Elf) !void { self.header = try reader.readStruct(elf.Elf64_Ehdr); - if (elf_file.base.options.target.cpu.arch != self.header.?.e_machine.toTargetCpuArch().?) { + const target = elf_file.base.comp.root_mod.resolved_target.result; + if (target.cpu.arch != self.header.?.e_machine.toTargetCpuArch().?) { try elf_file.reportParseError2( self.index, "invalid cpu architecture: {s}", diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig index 25a9975f01..1e7f2f4f9f 100644 --- a/src/link/Elf/synthetic_sections.zig +++ b/src/link/Elf/synthetic_sections.zig @@ -287,7 +287,8 @@ pub const ZigGotSection = struct { zig_got.flags.dirty = false; } const entry_size: u16 = elf_file.archPtrWidthBytes(); - const endian = elf_file.base.options.target.cpu.arch.endian(); + const target = elf_file.base.comp.root_mod.resolved_target.result; + const endian = target.cpu.arch.endian(); const off = zig_got.entryOffset(index, elf_file); const vaddr = zig_got.entryAddress(index, elf_file); const entry = zig_got.entries.items[index]; @@ -1575,7 +1576,8 @@ pub const ComdatGroupSection = struct { fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void { const entry_size = elf_file.archPtrWidthBytes(); - const endian = elf_file.base.options.target.cpu.arch.endian(); + const target = elf_file.base.comp.root_mod.resolved_target.result; + const endian = target.cpu.arch.endian(); switch (entry_size) { 2 => try writer.writeInt(u16, @intCast(value), endian), 4 => try writer.writeInt(u32, @intCast(value), endian), diff --git a/src/link/MachO.zig b/src/link/MachO.zig index eb5b5bce0d..c1d7eef652 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -143,7 +143,7 @@ tlv_table: TlvSymbolTable = .{}, /// Hot-code swapping state. hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{}, -darwin_sdk_layout: ?SdkLayout, +sdk_layout: ?SdkLayout, /// Size of the __PAGEZERO segment. pagezero_vmsize: u64, /// Minimum space for future expansion of the load commands. @@ -184,20 +184,21 @@ pub const SdkLayout = enum { pub fn open(arena: Allocator, options: link.File.OpenOptions) !*MachO { if (build_options.only_c) unreachable; - const target = options.comp.root_mod.resolved_target.result; - const use_lld = build_options.have_llvm and options.comp.config.use_lld; - const use_llvm = options.comp.config.use_llvm; + const comp = options.comp; + const target = comp.root_mod.resolved_target.result; + const use_lld = build_options.have_llvm and comp.config.use_lld; + const use_llvm = comp.config.use_llvm; assert(target.ofmt == .macho); - const gpa = options.comp.gpa; + const gpa = comp.gpa; const emit = options.emit; const mode: Mode = mode: { - if (use_llvm or options.module == null or options.cache_mode == .whole) + if (use_llvm or comp.module == null or comp.cache_mode == .whole) break :mode .zld; break :mode .incremental; }; const sub_path = if (mode == .zld) blk: { - if (options.module == null) { + if (comp.module == null) { // No point in opening a file, we would not write anything to it. // Initialize with empty. return createEmpty(arena, options); @@ -225,13 +226,13 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*MachO { .read = true, .mode = link.File.determineMode( use_lld, - options.comp.config.output_mode, - options.comp.config.link_mode, + comp.config.output_mode, + comp.config.link_mode, ), }); self.base.file = file; - if (!options.strip and options.module != null) { + if (self.base.debug_format != .strip and comp.module != null) { // Create dSYM bundle. log.debug("creating {s}.dSYM bundle", .{sub_path}); @@ -276,14 +277,15 @@ pub fn open(arena: Allocator, options: link.File.OpenOptions) !*MachO { } pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*MachO { - const self = try arena.create(MachO); - const optimize_mode = options.comp.root_mod.optimize_mode; - const use_llvm = options.comp.config.use_llvm; + const comp = options.comp; + const optimize_mode = comp.root_mod.optimize_mode; + const use_llvm = comp.config.use_llvm; + const self = try arena.create(MachO); self.* = .{ .base = .{ .tag = .macho, - .comp = options.comp, + .comp = comp, .emit = options.emit, .gc_sections = options.gc_sections orelse (optimize_mode != .Debug), .stack_size = options.stack_size orelse 16777216, @@ -297,7 +299,7 @@ pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*MachO { .function_sections = options.function_sections, .data_sections = options.data_sections, }, - .mode = if (use_llvm or options.module == null or options.cache_mode == .whole) + .mode = if (use_llvm or comp.module == null or comp.cache_mode == .whole) .zld else .incremental, @@ -305,9 +307,13 @@ pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*MachO { .headerpad_size = options.headerpad_size orelse default_headerpad_size, .headerpad_max_install_names = options.headerpad_max_install_names, .dead_strip_dylibs = options.dead_strip_dylibs, + .sdk_layout = options.darwin_sdk_layout, + .frameworks = options.frameworks, + .install_name = options.install_name, + .entitlements = options.entitlements, }; - if (use_llvm and options.module != null) { + if (use_llvm and comp.module != null) { self.llvm_object = try LlvmObject.create(arena, options); } @@ -357,6 +363,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No const output_mode = self.base.comp.config.output_mode; const module = self.base.comp.module orelse return error.LinkingWithoutZigSourceUnimplemented; + const target = self.base.comp.root_mod.resolved_target.result; if (self.lazy_syms.getPtr(.none)) |metadata| { // Most lazy symbols can be updated on first use, but @@ -569,7 +576,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No // written out to the file. // The most important here is to have the correct vm and filesize of the __LINKEDIT segment // where the code signature goes into. - var codesig = CodeSignature.init(getPageSize(self.base.options.target.cpu.arch)); + var codesig = CodeSignature.init(getPageSize(target.cpu.arch)); codesig.code_directory.ident = self.base.emit.sub_path; if (self.entitlements) |path| { try codesig.addEntitlements(gpa, path); @@ -619,7 +626,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No .version = 0, }); { - const platform = Platform.fromTarget(self.base.options.target); + const platform = Platform.fromTarget(target); const sdk_version: ?std.SemanticVersion = load_commands.inferSdkVersion(arena, comp); if (platform.isBuildVersionCompatible()) { try load_commands.writeBuildVersionLC(platform, sdk_version, lc_writer); @@ -701,7 +708,7 @@ pub fn resolveLibSystem( var checked_paths = std.ArrayList([]const u8).init(arena); success: { - if (self.base.options.darwin_sdk_layout) |sdk_layout| switch (sdk_layout) { + if (self.sdk_layout) |sdk_layout| switch (sdk_layout) { .sdk => { const dir = try fs.path.join(arena, &[_][]const u8{ self.base.options.sysroot.?, "usr", "lib" }); if (try accessLibPath(arena, &test_path, &checked_paths, dir, "libSystem")) break :success; @@ -817,6 +824,7 @@ fn parseObject( defer tracy.end(); const gpa = self.base.comp.gpa; + const target = self.base.comp.root_mod.resolved_target.result; const mtime: u64 = mtime: { const stat = file.stat() catch break :mtime 0; break :mtime @as(u64, @intCast(@divFloor(stat.mtime, 1_000_000_000))); @@ -839,8 +847,8 @@ fn parseObject( else => unreachable, }; const detected_platform = object.getPlatform(); - const this_cpu_arch = self.base.options.target.cpu.arch; - const this_platform = Platform.fromTarget(self.base.options.target); + const this_cpu_arch = target.cpu.arch; + const this_platform = Platform.fromTarget(target); if (this_cpu_arch != detected_cpu_arch or (detected_platform != null and !detected_platform.?.eqlTarget(this_platform))) @@ -867,8 +875,10 @@ pub fn parseLibrary( const tracy = trace(@src()); defer tracy.end(); + const target = self.base.comp.root_mod.resolved_target.result; + if (fat.isFatLibrary(file)) { - const offset = try self.parseFatLibrary(file, self.base.options.target.cpu.arch, ctx); + const offset = try self.parseFatLibrary(file, target.cpu.arch, ctx); try file.seekTo(offset); if (Archive.isArchive(file, offset)) { @@ -934,6 +944,7 @@ fn parseArchive( ctx: *ParseErrorCtx, ) ParseError!void { const gpa = self.base.comp.gpa; + const target = self.base.comp.root_mod.resolved_target.result; // We take ownership of the file so that we can store it for the duration of symbol resolution. // TODO we shouldn't need to do that and could pre-parse the archive like we do for zld/ELF? @@ -963,8 +974,8 @@ fn parseArchive( else => unreachable, }; const detected_platform = object.getPlatform(); - const this_cpu_arch = self.base.options.target.cpu.arch; - const this_platform = Platform.fromTarget(self.base.options.target); + const this_cpu_arch = target.cpu.arch; + const this_platform = Platform.fromTarget(target); if (this_cpu_arch != detected_cpu_arch or (detected_platform != null and !detected_platform.?.eqlTarget(this_platform))) @@ -1015,6 +1026,7 @@ fn parseDylib( ctx: *ParseErrorCtx, ) ParseError!void { const gpa = self.base.comp.gpa; + const target = self.base.comp.root_mod.resolved_target.result; const file_stat = try file.stat(); const file_size = math.cast(usize, file_stat.size - offset) orelse return error.Overflow; @@ -1038,8 +1050,8 @@ fn parseDylib( else => unreachable, }; const detected_platform = dylib.getPlatform(contents); - const this_cpu_arch = self.base.options.target.cpu.arch; - const this_platform = Platform.fromTarget(self.base.options.target); + const this_cpu_arch = target.cpu.arch; + const this_platform = Platform.fromTarget(target); if (this_cpu_arch != detected_cpu_arch or (detected_platform != null and !detected_platform.?.eqlTarget(this_platform))) @@ -1061,6 +1073,8 @@ fn parseLibStub( ctx: *ParseErrorCtx, ) ParseError!void { const gpa = self.base.comp.gpa; + const target = self.base.comp.root_mod.resolved_target.result; + var lib_stub = try LibStub.loadFromFile(gpa, file); defer lib_stub.deinit(); @@ -1068,7 +1082,7 @@ fn parseLibStub( // Verify target { - var matcher = try Dylib.TargetMatcher.init(gpa, self.base.options.target); + var matcher = try Dylib.TargetMatcher.init(gpa, target); defer matcher.deinit(); const first_tbd = lib_stub.inner[0]; @@ -1091,7 +1105,7 @@ fn parseLibStub( try dylib.parseFromStub( gpa, - self.base.options.target, + target, lib_stub, @intCast(self.dylibs.items.len), // TODO defer it till later dependent_libs, @@ -1236,7 +1250,8 @@ pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []u8) !void { fn writeToMemory(self: *MachO, task: std.os.darwin.MachTask, segment_index: u8, addr: u64, code: []const u8) !void { const segment = self.segments.items[segment_index]; - const cpu_arch = self.base.options.target.cpu.arch; + const target = self.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const nwritten = if (!segment.isWriteable()) try task.writeMemProtected(addr, code, cpu_arch) else @@ -1280,7 +1295,8 @@ fn writeStubHelperPreamble(self: *MachO) !void { if (self.stub_helper_preamble_allocated) return; const gpa = self.base.comp.gpa; - const cpu_arch = self.base.options.target.cpu.arch; + const target = self.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const size = stubs.stubHelperPreambleSize(cpu_arch); var buf = try std.ArrayList(u8).initCapacity(gpa, size); @@ -1306,11 +1322,12 @@ fn writeStubHelperPreamble(self: *MachO) !void { } fn writeStubTableEntry(self: *MachO, index: usize) !void { + const target = self.base.comp.root_mod.resolved_target.result; const stubs_sect_id = self.stubs_section_index.?; const stub_helper_sect_id = self.stub_helper_section_index.?; const laptr_sect_id = self.la_symbol_ptr_section_index.?; - const cpu_arch = self.base.options.target.cpu.arch; + const cpu_arch = target.cpu.arch; const stub_entry_size = stubs.stubSize(cpu_arch); const stub_helper_entry_size = stubs.stubHelperSize(cpu_arch); const stub_helper_preamble_size = stubs.stubHelperPreambleSize(cpu_arch); @@ -2243,7 +2260,7 @@ pub fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { .flags = macho.S_SYMBOL_STUBS | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS, - .reserved2 = stubs.stubSize(self.base.options.target.cpu.arch), + .reserved2 = stubs.stubSize(target.cpu.arch), }); self.stub_helper_section_index = try self.initSection("__TEXT", "__stub_helper", .{ .flags = macho.S_REGULAR | @@ -3116,7 +3133,8 @@ fn populateMissingMetadata(self: *MachO) !void { assert(self.mode == .incremental); const gpa = self.base.comp.gpa; - const cpu_arch = self.base.options.target.cpu.arch; + const target = self.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const pagezero_vmsize = self.calcPagezeroSize(); if (self.pagezero_segment_cmd_index == null) { @@ -3263,7 +3281,8 @@ fn populateMissingMetadata(self: *MachO) !void { fn calcPagezeroSize(self: *MachO) u64 { const output_mode = self.base.comp.config.output_mode; - const page_size = getPageSize(self.base.options.target.cpu.arch); + const target = self.base.comp.root_mod.resolved_target.result; + const page_size = getPageSize(target.cpu.arch); const aligned_pagezero_vmsize = mem.alignBackward(u64, self.pagezero_vmsize, page_size); if (output_mode == .Lib) return 0; if (aligned_pagezero_vmsize == 0) return 0; @@ -3305,7 +3324,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts reserved2: u32 = 0, }) !u8 { const gpa = self.base.comp.gpa; - const page_size = getPageSize(self.base.options.target.cpu.arch); + const target = self.base.comp.root_mod.resolved_target.result; + const page_size = getPageSize(target.cpu.arch); // In incremental context, we create one section per segment pairing. This way, // we can move the segment in raw file as we please. const segment_id = @as(u8, @intCast(self.segments.items.len)); @@ -3360,7 +3380,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { const segment = &self.segments.items[segment_index]; const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id]; const sect_capacity = self.allocatedSize(header.offset); - const page_size = getPageSize(self.base.options.target.cpu.arch); + const target = self.base.comp.root_mod.resolved_target.result; + const page_size = getPageSize(target.cpu.arch); if (needed_size > sect_capacity) { const new_offset = self.findFreeSpace(needed_size, page_size); @@ -3400,7 +3421,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { } fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { - const page_size = getPageSize(self.base.options.target.cpu.arch); + const target = self.base.comp.root_mod.resolved_target.result; + const page_size = getPageSize(target.cpu.arch); const header = &self.sections.items(.header)[sect_id]; const segment = self.getSegmentPtr(sect_id); const increased_size = padToIdeal(needed_size); @@ -3611,7 +3633,8 @@ pub fn writeSegmentHeaders(self: *MachO, writer: anytype) !void { } pub fn writeLinkeditSegmentData(self: *MachO) !void { - const page_size = getPageSize(self.base.options.target.cpu.arch); + const target = self.base.comp.root_mod.resolved_target.result; + const page_size = getPageSize(target.cpu.arch); const seg = self.getLinkeditSegmentPtr(); seg.filesize = 0; seg.vmsize = 0; @@ -3698,7 +3721,8 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void { } // Finally, unpack the rest. - const cpu_arch = self.base.options.target.cpu.arch; + const target = self.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; for (self.objects.items) |*object| { for (object.atoms.items) |atom_index| { const atom = self.getAtom(atom_index); @@ -3744,14 +3768,14 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void { }, else => unreachable, } - const target = Atom.parseRelocTarget(self, .{ + const reloc_target = Atom.parseRelocTarget(self, .{ .object_id = atom.getFile().?, .rel = rel, .code = code, .base_offset = ctx.base_offset, .base_addr = ctx.base_addr, }); - const target_sym = self.getSymbol(target); + const target_sym = self.getSymbol(reloc_target); if (target_sym.undf()) continue; const base_offset = @as(i32, @intCast(sym.n_value - segment.vmaddr)); @@ -3853,7 +3877,8 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void { } // Finally, unpack the rest. - const cpu_arch = self.base.options.target.cpu.arch; + const target = self.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; for (self.objects.items) |*object| { for (object.atoms.items) |atom_index| { const atom = self.getAtom(atom_index); @@ -4072,7 +4097,8 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: anytype) !void { const header = self.sections.items(.header)[stub_helper_section_index]; - const cpu_arch = self.base.options.target.cpu.arch; + const target = self.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const preamble_size = stubs.stubHelperPreambleSize(cpu_arch); const stub_size = stubs.stubHelperSize(cpu_arch); const stub_offset = stubs.stubOffsetInStubHelper(cpu_arch); @@ -4708,13 +4734,14 @@ pub fn writeUuid(self: *MachO, comp: *const Compilation, uuid_cmd_offset: u32, h } pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { + const target = self.base.comp.root_mod.resolved_target.result; const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForward(u64, seg.filesize, getPageSize(self.base.options.target.cpu.arch)); + seg.vmsize = mem.alignForward(u64, seg.filesize, getPageSize(target.cpu.arch)); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. @@ -4758,7 +4785,8 @@ pub fn writeHeader(self: *MachO, ncmds: u32, sizeofcmds: u32) !void { var header: macho.mach_header_64 = .{}; header.flags = macho.MH_NOUNDEFS | macho.MH_DYLDLINK | macho.MH_PIE | macho.MH_TWOLEVEL; - switch (self.base.options.target.cpu.arch) { + const target = self.base.comp.root_mod.resolved_target.result; + switch (target.cpu.arch) { .aarch64 => { header.cputype = macho.CPU_TYPE_ARM64; header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL; @@ -5123,9 +5151,10 @@ pub fn getTlvPtrEntryAddress(self: *MachO, sym_with_loc: SymbolWithLoc) ?u64 { } pub fn getStubsEntryAddress(self: *MachO, sym_with_loc: SymbolWithLoc) ?u64 { + const target = self.base.comp.root_mod.resolved_target.result; const index = self.stub_table.lookup.get(sym_with_loc) orelse return null; const header = self.sections.items(.header)[self.stubs_section_index.?]; - return header.addr + stubs.stubSize(self.base.options.target.cpu.arch) * index; + return header.addr + stubs.stubSize(target.cpu.arch) * index; } /// Returns symbol location corresponding to the set entrypoint if any. @@ -5234,8 +5263,9 @@ pub fn handleAndReportParseError( err: ParseError, ctx: *const ParseErrorCtx, ) error{OutOfMemory}!void { + const target = self.base.comp.root_mod.resolved_target.result; const gpa = self.base.comp.gpa; - const cpu_arch = self.base.options.target.cpu.arch; + const cpu_arch = target.cpu.arch; switch (err) { error.DylibAlreadyExists => {}, error.IncompatibleDylibVersion => { @@ -5270,7 +5300,7 @@ pub fn handleAndReportParseError( error.InvalidTarget => try self.reportParseError( path, "invalid target: expected '{}', but found '{s}'", - .{ Platform.fromTarget(self.base.options.target).fmtTarget(cpu_arch), targets_string.items }, + .{ Platform.fromTarget(target).fmtTarget(cpu_arch), targets_string.items }, ), error.InvalidTargetFatLibrary => try self.reportParseError( path, diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index 290c67c45e..0263995359 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -387,7 +387,8 @@ pub fn calcInnerSymbolOffset(macho_file: *MachO, atom_index: Index, sym_index: u } pub fn scanAtomRelocs(macho_file: *MachO, atom_index: Index, relocs: []align(1) const macho.relocation_info) !void { - const arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const arch = target.cpu.arch; const atom = macho_file.getAtom(atom_index); assert(atom.getFile() != null); // synthetic atoms do not have relocs @@ -434,6 +435,7 @@ pub fn parseRelocTarget(macho_file: *MachO, ctx: struct { const tracy = trace(@src()); defer tracy.end(); + const target = macho_file.base.comp.root_mod.resolved_target.result; const object = &macho_file.objects.items[ctx.object_id]; log.debug("parsing reloc target in object({d}) '{s}' ", .{ ctx.object_id, object.name }); @@ -447,7 +449,7 @@ pub fn parseRelocTarget(macho_file: *MachO, ctx: struct { else mem.readInt(u32, ctx.code[rel_offset..][0..4], .little); } else blk: { - assert(macho_file.base.options.target.cpu.arch == .x86_64); + assert(target.cpu.arch == .x86_64); const correction: u3 = switch (@as(macho.reloc_type_x86_64, @enumFromInt(ctx.rel.r_type))) { .X86_64_RELOC_SIGNED => 0, .X86_64_RELOC_SIGNED_1 => 1, @@ -467,18 +469,18 @@ pub fn parseRelocTarget(macho_file: *MachO, ctx: struct { const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = ctx.object_id + 1 }; const sym = macho_file.getSymbol(sym_loc); - const target = if (sym.sect() and !sym.ext()) + const reloc_target = if (sym.sect() and !sym.ext()) sym_loc else if (object.getGlobal(sym_index)) |global_index| macho_file.globals.items[global_index] else sym_loc; log.debug(" | target %{d} ('{s}') in object({?d})", .{ - target.sym_index, - macho_file.getSymbolName(target), - target.getFile(), + reloc_target.sym_index, + macho_file.getSymbolName(reloc_target), + reloc_target.getFile(), }); - return target; + return reloc_target; } pub fn getRelocTargetAtomIndex(macho_file: *MachO, target: SymbolWithLoc) ?Index { @@ -599,7 +601,8 @@ pub fn resolveRelocs( atom_code: []u8, atom_relocs: []align(1) const macho.relocation_info, ) !void { - const arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const arch = target.cpu.arch; const atom = macho_file.getAtom(atom_index); assert(atom.getFile() != null); // synthetic atoms do not have relocs @@ -1192,7 +1195,8 @@ pub fn getAtomRelocs(macho_file: *MachO, atom_index: Index) []const macho.reloca } pub fn relocRequiresGot(macho_file: *MachO, rel: macho.relocation_info) bool { - switch (macho_file.base.options.target.cpu.arch) { + const target = macho_file.base.comp.root_mod.resolved_target.result; + switch (target.cpu.arch) { .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12, @@ -1211,7 +1215,8 @@ pub fn relocRequiresGot(macho_file: *MachO, rel: macho.relocation_info) bool { } pub fn relocIsTlv(macho_file: *MachO, rel: macho.relocation_info) bool { - switch (macho_file.base.options.target.cpu.arch) { + const target = macho_file.base.comp.root_mod.resolved_target.result; + switch (target.cpu.arch) { .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_TLVP_LOAD_PAGE21, .ARM64_RELOC_TLVP_LOAD_PAGEOFF12, @@ -1227,7 +1232,8 @@ pub fn relocIsTlv(macho_file: *MachO, rel: macho.relocation_info) bool { } pub fn relocIsStub(macho_file: *MachO, rel: macho.relocation_info) bool { - switch (macho_file.base.options.target.cpu.arch) { + const target = macho_file.base.comp.root_mod.resolved_target.result; + switch (target.cpu.arch) { .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_BRANCH26 => return true, else => return false, diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index f204093290..2e7f8e49af 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -39,10 +39,12 @@ pub const Reloc = struct { /// You must call this function *after* `MachO.populateMissingMetadata()` /// has been called to get a viable debug symbols output. pub fn populateMissingMetadata(self: *DebugSymbols, macho_file: *MachO) !void { + const target = macho_file.base.comp.root_mod.resolved_target.result; + if (self.dwarf_segment_cmd_index == null) { self.dwarf_segment_cmd_index = @as(u8, @intCast(self.segments.items.len)); - const page_size = MachO.getPageSize(macho_file.base.options.target.cpu.arch); + const page_size = MachO.getPageSize(target.cpu.arch); const off = @as(u64, @intCast(page_size)); const ideal_size: u16 = 200 + 128 + 160 + 250; const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), page_size); @@ -332,7 +334,8 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void { file_size = @max(file_size, header.offset + header.size); } - const page_size = MachO.getPageSize(macho_file.base.options.target.cpu.arch); + const target = macho_file.base.comp.root_mod.resolved_target.result; + const page_size = MachO.getPageSize(target.cpu.arch); const aligned_size = mem.alignForward(u64, file_size, page_size); dwarf_segment.vmaddr = base_vmaddr; dwarf_segment.filesize = aligned_size; @@ -394,10 +397,12 @@ fn writeSegmentHeaders(self: *DebugSymbols, macho_file: *MachO, writer: anytype) } fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: u32, sizeofcmds: u32) !void { + const target = macho_file.base.comp.root_mod.resolved_target.result; + var header: macho.mach_header_64 = .{}; header.filetype = macho.MH_DSYM; - switch (macho_file.base.options.target.cpu.arch) { + switch (target.cpu.arch) { .aarch64 => { header.cputype = macho.CPU_TYPE_ARM64; header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL; @@ -435,7 +440,8 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void { try self.writeSymtab(macho_file); try self.writeStrtab(); - const page_size = MachO.getPageSize(macho_file.base.options.target.cpu.arch); + const target = macho_file.base.comp.root_mod.resolved_target.result; + const page_size = MachO.getPageSize(target.cpu.arch); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const aligned_size = mem.alignForward(u64, seg.filesize, page_size); seg.vmsize = aligned_size; diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 0c68d70644..5185b93386 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -355,6 +355,7 @@ pub fn splitIntoAtoms(self: *Object, macho_file: *MachO, object_id: u32) SplitIn /// into subsections where each subsection then represents an Atom. pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !void { const gpa = macho_file.base.allocator; + const target = macho_file.base.comp.root_mod.resolved_target.result; const sections = self.getSourceSections(); for (sections, 0..) |sect, id| { @@ -448,7 +449,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) ! try self.parseRelocs(gpa, section.id); - const cpu_arch = macho_file.base.options.target.cpu.arch; + const cpu_arch = target.cpu.arch; const sect_loc = filterSymbolsBySection(symtab[sect_sym_index..], sect_id + 1); const sect_start_index = sect_sym_index + sect_loc.index; @@ -676,7 +677,8 @@ fn parseEhFrameSection(self: *Object, macho_file: *MachO, object_id: u32) !void macho_file.eh_frame_section_index = try macho_file.initSection("__TEXT", "__eh_frame", .{}); } - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; try self.parseRelocs(gpa, sect_id); const relocs = self.getRelocs(sect_id); @@ -704,7 +706,7 @@ fn parseEhFrameSection(self: *Object, macho_file: *MachO, object_id: u32) !void }); if (record.tag == .fde) { - const target = blk: { + const reloc_target = blk: { switch (cpu_arch) { .aarch64 => { assert(rel_pos.len > 0); // TODO convert to an error as the FDE eh frame is malformed @@ -714,13 +716,13 @@ fn parseEhFrameSection(self: *Object, macho_file: *MachO, object_id: u32) !void @as(macho.reloc_type_arm64, @enumFromInt(rel.r_type)) == .ARM64_RELOC_UNSIGNED) break rel; } else unreachable; - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = it.data[offset..], .base_offset = @as(i32, @intCast(offset)), }); - break :blk target; + break :blk reloc_target; }, .x86_64 => { const target_address = record.getTargetSymbolAddress(.{ @@ -728,16 +730,16 @@ fn parseEhFrameSection(self: *Object, macho_file: *MachO, object_id: u32) !void .base_offset = offset, }); const target_sym_index = self.getSymbolByAddress(target_address, null); - const target = if (self.getGlobal(target_sym_index)) |global_index| + const reloc_target = if (self.getGlobal(target_sym_index)) |global_index| macho_file.globals.items[global_index] else SymbolWithLoc{ .sym_index = target_sym_index, .file = object_id + 1 }; - break :blk target; + break :blk reloc_target; }, else => unreachable, } }; - if (target.getFile() != object_id) { + if (reloc_target.getFile() != object_id) { log.debug("FDE at offset {x} marked DEAD", .{offset}); self.eh_frame_relocs_lookup.getPtr(offset).?.dead = true; } else { @@ -746,12 +748,12 @@ fn parseEhFrameSection(self: *Object, macho_file: *MachO, object_id: u32) !void // very problematic when using Zig's @export feature to re-export symbols under // additional names. For that reason, we need to ensure we record aliases here // too so that we can tie them with their matching unwind records and vice versa. - const aliases = self.getSymbolAliases(target.sym_index); + const aliases = self.getSymbolAliases(reloc_target.sym_index); var i: u32 = 0; while (i < aliases.len) : (i += 1) { const actual_target = SymbolWithLoc{ .sym_index = i + aliases.start, - .file = target.file, + .file = reloc_target.file, }; log.debug("FDE at offset {x} tracks {s}", .{ offset, @@ -766,7 +768,8 @@ fn parseEhFrameSection(self: *Object, macho_file: *MachO, object_id: u32) !void fn parseUnwindInfo(self: *Object, macho_file: *MachO, object_id: u32) !void { const gpa = macho_file.base.allocator; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const sect_id = self.unwind_info_sect_id orelse { // If it so happens that the object had `__eh_frame` section defined but no `__compact_unwind`, // we will try fully synthesising unwind info records to somewhat match Apple ld's @@ -818,13 +821,13 @@ fn parseUnwindInfo(self: *Object, macho_file: *MachO, object_id: u32) !void { // Find function symbol that this record describes const rel = relocs[rel_pos.start..][rel_pos.len - 1]; - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), .base_offset = @as(i32, @intCast(offset)), }); - if (target.getFile() != object_id) { + if (reloc_target.getFile() != object_id) { log.debug("unwind record {d} marked DEAD", .{record_id}); self.unwind_relocs_lookup[record_id].dead = true; } else { @@ -833,12 +836,12 @@ fn parseUnwindInfo(self: *Object, macho_file: *MachO, object_id: u32) !void { // very problematic when using Zig's @export feature to re-export symbols under // additional names. For that reason, we need to ensure we record aliases here // too so that we can tie them with their matching unwind records and vice versa. - const aliases = self.getSymbolAliases(target.sym_index); + const aliases = self.getSymbolAliases(reloc_target.sym_index); var i: u32 = 0; while (i < aliases.len) : (i += 1) { const actual_target = SymbolWithLoc{ .sym_index = i + aliases.start, - .file = target.file, + .file = reloc_target.file, }; log.debug("unwind record {d} tracks {s}", .{ record_id, diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig index 74be9eb0d5..85c19c7608 100644 --- a/src/link/MachO/Relocation.zig +++ b/src/link/MachO/Relocation.zig @@ -58,11 +58,12 @@ pub fn isStubTrampoline(self: Relocation, macho_file: *MachO) bool { } pub fn getTargetBaseAddress(self: Relocation, macho_file: *MachO) ?u64 { + const target = macho_file.base.comp.root_mod.resolved_target.result; if (self.isStubTrampoline(macho_file)) { const index = macho_file.stub_table.lookup.get(self.target) orelse return null; const header = macho_file.sections.items(.header)[macho_file.stubs_section_index.?]; return header.addr + - index * @import("stubs.zig").stubSize(macho_file.base.options.target.cpu.arch); + index * @import("stubs.zig").stubSize(target.cpu.arch); } switch (self.type) { .got, .got_page, .got_pageoff => { @@ -84,7 +85,8 @@ pub fn getTargetBaseAddress(self: Relocation, macho_file: *MachO) ?u64 { } pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, code: []u8) void { - const arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const arch = target.cpu.arch; const atom = macho_file.getAtom(atom_index); const source_sym = atom.getSymbol(macho_file); const source_addr = source_sym.n_value + self.offset; diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig index be6c9dbb34..7223b5555f 100644 --- a/src/link/MachO/UnwindInfo.zig +++ b/src/link/MachO/UnwindInfo.zig @@ -184,7 +184,8 @@ pub fn deinit(info: *UnwindInfo) void { pub fn scanRelocs(macho_file: *MachO) !void { if (macho_file.unwind_info_section_index == null) return; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; for (macho_file.objects.items, 0..) |*object, object_id| { const unwind_records = object.getUnwindRecords(); for (object.exec_atoms.items) |atom_index| { @@ -196,13 +197,13 @@ pub fn scanRelocs(macho_file: *MachO) !void { if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) { if (getPersonalityFunctionReloc(macho_file, @as(u32, @intCast(object_id)), record_id)) |rel| { // Personality function; add GOT pointer. - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - try macho_file.addGotEntry(target); + try macho_file.addGotEntry(reloc_target); } } } @@ -213,7 +214,8 @@ pub fn scanRelocs(macho_file: *MachO) !void { pub fn collect(info: *UnwindInfo, macho_file: *MachO) !void { if (macho_file.unwind_info_section_index == null) return; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; var records = std.ArrayList(macho.compact_unwind_entry).init(info.gpa); defer records.deinit(); @@ -247,15 +249,15 @@ pub fn collect(info: *UnwindInfo, macho_file: *MachO) !void { @as(u32, @intCast(object_id)), record_id, )) |rel| { - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - const personality_index = info.getPersonalityFunction(target) orelse inner: { + const personality_index = info.getPersonalityFunction(reloc_target) orelse inner: { const personality_index = info.personalities_count; - info.personalities[personality_index] = target; + info.personalities[personality_index] = reloc_target; info.personalities_count += 1; break :inner personality_index; }; @@ -265,13 +267,13 @@ pub fn collect(info: *UnwindInfo, macho_file: *MachO) !void { } if (getLsdaReloc(macho_file, @as(u32, @intCast(object_id)), record_id)) |rel| { - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = @as(u32, @intCast(object_id)), .rel = rel, .code = mem.asBytes(&record), .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - record.lsda = @as(u64, @bitCast(target)); + record.lsda = @as(u64, @bitCast(reloc_target)); } } break :blk record; @@ -557,13 +559,14 @@ pub fn write(info: *UnwindInfo, macho_file: *MachO) !void { const text_sect = macho_file.sections.items(.header)[text_sect_id]; var personalities: [max_personalities]u32 = undefined; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; log.debug("Personalities:", .{}); - for (info.personalities[0..info.personalities_count], 0..) |target, i| { - const addr = macho_file.getGotEntryAddress(target).?; + for (info.personalities[0..info.personalities_count], 0..) |reloc_target, i| { + const addr = macho_file.getGotEntryAddress(reloc_target).?; personalities[i] = @as(u32, @intCast(addr - seg.vmaddr)); - log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], macho_file.getSymbolName(target) }); + log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], macho_file.getSymbolName(reloc_target) }); } for (info.records.items) |*rec| { diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig index 21f9154b45..51a43351ba 100644 --- a/src/link/MachO/dead_strip.zig +++ b/src/link/MachO/dead_strip.zig @@ -118,7 +118,8 @@ fn markLive(macho_file: *MachO, atom_index: Atom.Index, alive: *AtomTable) void alive.putAssumeCapacityNoClobber(atom_index, {}); - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const sym = macho_file.getSymbol(atom.getSymbolWithLoc()); const header = macho_file.sections.items(.header)[sym.n_sect - 1]; @@ -129,7 +130,7 @@ fn markLive(macho_file: *MachO, atom_index: Atom.Index, alive: *AtomTable) void const ctx = Atom.getRelocContext(macho_file, atom_index); for (relocs) |rel| { - const target = switch (cpu_arch) { + const reloc_target = switch (cpu_arch) { .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_ADDEND => continue, else => Atom.parseRelocTarget(macho_file, .{ @@ -149,19 +150,19 @@ fn markLive(macho_file: *MachO, atom_index: Atom.Index, alive: *AtomTable) void }), else => unreachable, }; - const target_sym = macho_file.getSymbol(target); + const target_sym = macho_file.getSymbol(reloc_target); if (target_sym.undf()) continue; - if (target.getFile() == null) { - const target_sym_name = macho_file.getSymbolName(target); + if (reloc_target.getFile() == null) { + const target_sym_name = macho_file.getSymbolName(reloc_target); if (mem.eql(u8, "__mh_execute_header", target_sym_name)) continue; if (mem.eql(u8, "___dso_handle", target_sym_name)) continue; unreachable; // referenced symbol not found } - const object = macho_file.objects.items[target.getFile().?]; - const target_atom_index = object.getAtomIndexForSymbol(target.sym_index).?; + const object = macho_file.objects.items[reloc_target.getFile().?]; + const target_atom_index = object.getAtomIndexForSymbol(reloc_target.sym_index).?; log.debug(" following ATOM({d}, %{d}, {?d})", .{ target_atom_index, macho_file.getAtom(target_atom_index).sym_index, @@ -178,7 +179,8 @@ fn refersLive(macho_file: *MachO, atom_index: Atom.Index, alive: AtomTable) bool log.debug("refersLive(ATOM({d}, %{d}, {?d}))", .{ atom_index, sym_loc.sym_index, sym_loc.getFile() }); - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const sym = macho_file.getSymbol(sym_loc); const header = macho_file.sections.items(.header)[sym.n_sect - 1]; @@ -189,7 +191,7 @@ fn refersLive(macho_file: *MachO, atom_index: Atom.Index, alive: AtomTable) bool const ctx = Atom.getRelocContext(macho_file, atom_index); for (relocs) |rel| { - const target = switch (cpu_arch) { + const reloc_target = switch (cpu_arch) { .aarch64 => switch (@as(macho.reloc_type_arm64, @enumFromInt(rel.r_type))) { .ARM64_RELOC_ADDEND => continue, else => Atom.parseRelocTarget(macho_file, .{ @@ -210,9 +212,9 @@ fn refersLive(macho_file: *MachO, atom_index: Atom.Index, alive: AtomTable) bool else => unreachable, }; - const object = macho_file.objects.items[target.getFile().?]; - const target_atom_index = object.getAtomIndexForSymbol(target.sym_index) orelse { - log.debug("atom for symbol '{s}' not found; skipping...", .{macho_file.getSymbolName(target)}); + const object = macho_file.objects.items[reloc_target.getFile().?]; + const target_atom_index = object.getAtomIndexForSymbol(reloc_target.sym_index) orelse { + log.debug("atom for symbol '{s}' not found; skipping...", .{macho_file.getSymbolName(reloc_target)}); continue; }; if (alive.contains(target_atom_index)) { @@ -271,7 +273,8 @@ fn mark(macho_file: *MachO, roots: AtomTable, alive: *AtomTable) void { fn markUnwindRecords(macho_file: *MachO, object_id: u32, alive: *AtomTable) void { const object = &macho_file.objects.items[object_id]; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const unwind_records = object.getUnwindRecords(); @@ -310,29 +313,29 @@ fn markUnwindRecords(macho_file: *MachO, object_id: u32, alive: *AtomTable) void markEhFrameRecords(macho_file, object_id, atom_index, alive); } else { if (UnwindInfo.getPersonalityFunctionReloc(macho_file, object_id, record_id)) |rel| { - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - const target_sym = macho_file.getSymbol(target); + const target_sym = macho_file.getSymbol(reloc_target); if (!target_sym.undf()) { - const target_object = macho_file.objects.items[target.getFile().?]; - const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?; + const target_object = macho_file.objects.items[reloc_target.getFile().?]; + const target_atom_index = target_object.getAtomIndexForSymbol(reloc_target.sym_index).?; markLive(macho_file, target_atom_index, alive); } } if (UnwindInfo.getLsdaReloc(macho_file, object_id, record_id)) |rel| { - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = mem.asBytes(&record), .base_offset = @as(i32, @intCast(record_id * @sizeOf(macho.compact_unwind_entry))), }); - const target_object = macho_file.objects.items[target.getFile().?]; - const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?; + const target_object = macho_file.objects.items[reloc_target.getFile().?]; + const target_atom_index = target_object.getAtomIndexForSymbol(reloc_target.sym_index).?; markLive(macho_file, target_atom_index, alive); } } @@ -341,7 +344,8 @@ fn markUnwindRecords(macho_file: *MachO, object_id: u32, alive: *AtomTable) void } fn markEhFrameRecords(macho_file: *MachO, object_id: u32, atom_index: Atom.Index, alive: *AtomTable) void { - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const object = &macho_file.objects.items[object_id]; var it = object.getEhFrameRecordsIterator(); var inner_syms_it = Atom.getInnerSymbolsIterator(macho_file, atom_index); @@ -361,16 +365,16 @@ fn markEhFrameRecords(macho_file: *MachO, object_id: u32, atom_index: Atom.Index // Mark FDE references which should include any referenced LSDA record const relocs = eh_frame.getRelocs(macho_file, object_id, fde_offset); for (relocs) |rel| { - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = fde.data, .base_offset = @as(i32, @intCast(fde_offset)) + 4, }); - const target_sym = macho_file.getSymbol(target); + const target_sym = macho_file.getSymbol(reloc_target); if (!target_sym.undf()) blk: { - const target_object = macho_file.objects.items[target.getFile().?]; - const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index) orelse + const target_object = macho_file.objects.items[reloc_target.getFile().?]; + const target_atom_index = target_object.getAtomIndexForSymbol(reloc_target.sym_index) orelse break :blk; markLive(macho_file, target_atom_index, alive); } @@ -394,11 +398,11 @@ fn markEhFrameRecords(macho_file: *MachO, object_id: u32, atom_index: Atom.Index // Mark CIE references which should include any referenced personalities // that are defined locally. - if (cie.getPersonalityPointerReloc(macho_file, object_id, cie_offset)) |target| { - const target_sym = macho_file.getSymbol(target); + if (cie.getPersonalityPointerReloc(macho_file, object_id, cie_offset)) |reloc_target| { + const target_sym = macho_file.getSymbol(reloc_target); if (!target_sym.undf()) { - const target_object = macho_file.objects.items[target.getFile().?]; - const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?; + const target_object = macho_file.objects.items[reloc_target.getFile().?]; + const target_atom_index = target_object.getAtomIndexForSymbol(reloc_target.sym_index).?; markLive(macho_file, target_atom_index, alive); } } diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig index 0f021a569c..8223c830e0 100644 --- a/src/link/MachO/eh_frame.zig +++ b/src/link/MachO/eh_frame.zig @@ -35,7 +35,8 @@ pub fn calcSectionSize(macho_file: *MachO, unwind_info: *const UnwindInfo) error sect.@"align" = 3; sect.size = 0; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const gpa = macho_file.base.allocator; var size: u32 = 0; @@ -86,7 +87,8 @@ pub fn write(macho_file: *MachO, unwind_info: *UnwindInfo) !void { const seg_id = macho_file.sections.items(.segment_index)[sect_id]; const seg = macho_file.segments.items[seg_id]; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const gpa = macho_file.base.allocator; var eh_records = std.AutoArrayHashMap(u32, EhFrameRecord(true)).init(gpa); @@ -109,11 +111,11 @@ pub fn write(macho_file: *MachO, unwind_info: *UnwindInfo) !void { for (object.exec_atoms.items) |atom_index| { var inner_syms_it = Atom.getInnerSymbolsIterator(macho_file, atom_index); - while (inner_syms_it.next()) |target| { - const fde_record_offset = object.eh_frame_records_lookup.get(target) orelse continue; + while (inner_syms_it.next()) |reloc_target| { + const fde_record_offset = object.eh_frame_records_lookup.get(reloc_target) orelse continue; if (object.eh_frame_relocs_lookup.get(fde_record_offset).?.dead) continue; - const record_id = unwind_info.records_lookup.get(target) orelse continue; + const record_id = unwind_info.records_lookup.get(reloc_target) orelse continue; const record = &unwind_info.records.items[record_id]; // TODO skip this check if no __compact_unwind is present @@ -153,7 +155,7 @@ pub fn write(macho_file: *MachO, unwind_info: *UnwindInfo) !void { .aarch64 => {}, // relocs take care of LSDA pointers .x86_64 => { // We need to relocate target symbol address ourselves. - const atom_sym = macho_file.getSymbol(target); + const atom_sym = macho_file.getSymbol(reloc_target); try fde_record.setTargetSymbolAddress(atom_sym.n_value, .{ .base_addr = sect.addr, .base_offset = eh_frame_offset, @@ -278,7 +280,8 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { object_id: u32, source_offset: u32, ) ?SymbolWithLoc { - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const relocs = getRelocs(macho_file, object_id, source_offset); for (relocs) |rel| { switch (cpu_arch) { @@ -301,13 +304,13 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { }, else => unreachable, } - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = rec.data, .base_offset = @as(i32, @intCast(source_offset)) + 4, }); - return target; + return reloc_target; } return null; } @@ -319,11 +322,12 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { }) !void { comptime assert(is_mutable); - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const relocs = getRelocs(macho_file, object_id, ctx.source_offset); for (relocs) |rel| { - const target = Atom.parseRelocTarget(macho_file, .{ + const reloc_target = Atom.parseRelocTarget(macho_file, .{ .object_id = object_id, .rel = rel, .code = rec.data, @@ -340,14 +344,14 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { // Address of the __eh_frame in the source object file }, .ARM64_RELOC_POINTER_TO_GOT => { - const target_addr = macho_file.getGotEntryAddress(target).?; + const target_addr = macho_file.getGotEntryAddress(reloc_target).?; const result = math.cast(i32, @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr))) orelse return error.Overflow; mem.writeInt(i32, rec.data[rel_offset..][0..4], result, .little); }, .ARM64_RELOC_UNSIGNED => { assert(rel.r_extern == 1); - const target_addr = Atom.getRelocTargetAddress(macho_file, target, false); + const target_addr = Atom.getRelocTargetAddress(macho_file, reloc_target, false); const result = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr)); mem.writeInt(i64, rec.data[rel_offset..][0..8], @as(i64, @intCast(result)), .little); }, @@ -358,7 +362,7 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { const rel_type = @as(macho.reloc_type_x86_64, @enumFromInt(rel.r_type)); switch (rel_type) { .X86_64_RELOC_GOT => { - const target_addr = macho_file.getGotEntryAddress(target).?; + const target_addr = macho_file.getGotEntryAddress(reloc_target).?; const addend = mem.readInt(i32, rec.data[rel_offset..][0..4], .little); const adjusted_target_addr = @as(u64, @intCast(@as(i64, @intCast(target_addr)) + addend)); const disp = try Relocation.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0); @@ -374,7 +378,8 @@ pub fn EhFrameRecord(comptime is_mutable: bool) type { pub fn getCiePointerSource(rec: Record, object_id: u32, macho_file: *MachO, offset: u32) u32 { assert(rec.tag == .fde); - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const addend = mem.readInt(u32, rec.data[0..4], .little); switch (cpu_arch) { .aarch64 => { diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index f064415739..be8ac63642 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -472,11 +472,11 @@ pub fn inferSdkVersion(gpa: Allocator, comp: *const Compilation) ?std.SemanticVe defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const options = comp.bin_file.options; + const macho_file = comp.bin_file.cast(MachO).?; - const sdk_layout = options.darwin_sdk_layout orelse return null; + const sdk_layout = macho_file.sdk_layout orelse return null; const sdk_dir = switch (sdk_layout) { - .sdk => options.sysroot.?, + .sdk => macho_file.sysroot.?, .vendored => std.fs.path.join(arena, &.{ comp.zig_lib_directory.path.?, "libc", "darwin" }) catch return null, }; if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| { diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index e0aa82f243..8b0aa90f96 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -458,7 +458,7 @@ pub fn linkWithZld( } try writeAtoms(macho_file); - if (macho_file.base.options.target.cpu.arch == .aarch64) try writeThunks(macho_file); + if (target.cpu.arch == .aarch64) try writeThunks(macho_file); try writeDyldPrivateAtom(macho_file); if (macho_file.stubs_section_index) |_| { @@ -557,7 +557,7 @@ pub fn linkWithZld( .version = 0, }); { - const platform = Platform.fromTarget(macho_file.base.options.target); + const platform = Platform.fromTarget(target); const sdk_version: ?std.SemanticVersion = load_commands.inferSdkVersion(arena, comp); if (platform.isBuildVersionCompatible()) { try load_commands.writeBuildVersionLC(platform, sdk_version, lc_writer); @@ -610,7 +610,8 @@ pub fn linkWithZld( fn createSegments(macho_file: *MachO) !void { const gpa = macho_file.base.allocator; - const page_size = MachO.getPageSize(macho_file.base.options.target.cpu.arch); + const target = macho_file.base.comp.root_mod.resolved_target.result; + const page_size = MachO.getPageSize(target.cpu.arch); const aligned_pagezero_vmsize = mem.alignBackward(u64, macho_file.pagezero_vmsize, page_size); if (macho_file.base.comp.config.output_mode != .Lib and aligned_pagezero_vmsize > 0) { if (aligned_pagezero_vmsize != macho_file.pagezero_vmsize) { @@ -755,7 +756,8 @@ fn writeDyldPrivateAtom(macho_file: *MachO) !void { } fn writeThunks(macho_file: *MachO) !void { - assert(macho_file.base.options.target.cpu.arch == .aarch64); + const target = macho_file.base.comp.root_mod.resolved_target.result; + assert(target.cpu.arch == .aarch64); const gpa = macho_file.base.allocator; const sect_id = macho_file.text_section_index orelse return; @@ -791,7 +793,8 @@ fn writePointerEntries(macho_file: *MachO, sect_id: u8, table: anytype) !void { fn writeStubs(macho_file: *MachO) !void { const gpa = macho_file.base.allocator; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const stubs_header = macho_file.sections.items(.header)[macho_file.stubs_section_index.?]; const la_symbol_ptr_header = macho_file.sections.items(.header)[macho_file.la_symbol_ptr_section_index.?]; @@ -813,7 +816,8 @@ fn writeStubs(macho_file: *MachO) !void { fn writeStubHelpers(macho_file: *MachO) !void { const gpa = macho_file.base.allocator; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const stub_helper_header = macho_file.sections.items(.header)[macho_file.stub_helper_section_index.?]; const capacity = math.cast(usize, stub_helper_header.size) orelse return error.Overflow; @@ -856,7 +860,8 @@ fn writeStubHelpers(macho_file: *MachO) !void { fn writeLaSymbolPtrs(macho_file: *MachO) !void { const gpa = macho_file.base.allocator; - const cpu_arch = macho_file.base.options.target.cpu.arch; + const target = macho_file.base.comp.root_mod.resolved_target.result; + const cpu_arch = target.cpu.arch; const la_symbol_ptr_header = macho_file.sections.items(.header)[macho_file.la_symbol_ptr_section_index.?]; const stub_helper_header = macho_file.sections.items(.header)[macho_file.stub_helper_section_index.?]; @@ -964,11 +969,12 @@ fn pruneAndSortSections(macho_file: *MachO) !void { } fn calcSectionSizes(macho_file: *MachO) !void { + const target = macho_file.base.comp.root_mod.resolved_target.result; const slice = macho_file.sections.slice(); for (slice.items(.header), 0..) |*header, sect_id| { if (header.size == 0) continue; if (macho_file.text_section_index) |txt| { - if (txt == sect_id and macho_file.base.options.target.cpu.arch == .aarch64) continue; + if (txt == sect_id and target.cpu.arch == .aarch64) continue; } var atom_index = slice.items(.first_atom_index)[sect_id] orelse continue; @@ -991,7 +997,7 @@ fn calcSectionSizes(macho_file: *MachO) !void { } } - if (macho_file.text_section_index != null and macho_file.base.options.target.cpu.arch == .aarch64) { + if (macho_file.text_section_index != null and target.cpu.arch == .aarch64) { // Create jump/branch range extenders if needed. try thunks.createThunks(macho_file, macho_file.text_section_index.?); } @@ -1043,7 +1049,7 @@ fn calcSectionSizes(macho_file: *MachO) !void { header.@"align" = 3; } - const cpu_arch = macho_file.base.options.target.cpu.arch; + const cpu_arch = target.cpu.arch; if (macho_file.stubs_section_index) |sect_id| { const header = &macho_file.sections.items(.header)[sect_id]; @@ -1093,6 +1099,7 @@ fn getSegmentAllocBase(macho_file: *MachO, segment_index: u8) struct { vmaddr: u } fn allocateSegment(macho_file: *MachO, segment_index: u8, init_size: u64) !void { + const target = macho_file.base.comp.root_mod.resolved_target.result; const segment = &macho_file.segments.items[segment_index]; if (mem.eql(u8, segment.segName(), "__PAGEZERO")) return; // allocated upon creation @@ -1175,7 +1182,7 @@ fn allocateSegment(macho_file: *MachO, segment_index: u8, init_size: u64) !void segment.vmsize = start; } - const page_size = MachO.getPageSize(macho_file.base.options.target.cpu.arch); + const page_size = MachO.getPageSize(target.cpu.arch); segment.filesize = mem.alignForward(u64, segment.filesize, page_size); segment.vmsize = mem.alignForward(u64, segment.vmsize, page_size); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index ece949b01e..247986d767 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -206,7 +206,8 @@ pub const Atom = struct { // asserts that self.got_index != null pub fn getOffsetTableAddress(self: Atom, plan9: *Plan9) u64 { - const ptr_bytes = @divExact(plan9.base.options.target.ptrBitWidth(), 8); + const target = plan9.base.comp.root_mod.resolved_target.result; + const ptr_bytes = @divExact(target.ptrBitWidth(), 8); const got_addr = plan9.bases.data; const got_index = self.got_index.?; return got_addr + got_index * ptr_bytes; @@ -293,9 +294,11 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases { }; } -pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 { - if (options.use_llvm) - return error.LLVMBackendDoesNotSupportPlan9; +pub fn createEmpty(arena: Allocator, options: link.File.OpenOptions) !*Plan9 { + _ = arena; + const target = options.comp.root_mod.resolved_target.result; + const gpa = options.comp.gpa; + const sixtyfour_bit: bool = switch (options.target.ptrBitWidth()) { 0...32 => false, 33...64 => true, @@ -315,7 +318,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 { }, .sixtyfour_bit = sixtyfour_bit, .bases = undefined, - .magic = try aout.magicFromArch(self.base.options.target.cpu.arch), + .magic = try aout.magicFromArch(target.cpu.arch), }; // a / will always be in a file path try self.file_segments.put(gpa, "/", 1); @@ -399,6 +402,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: } const gpa = self.base.comp.gpa; + const target = self.base.comp.root_mod.resolved_target.result; const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -414,7 +418,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: .end_line = undefined, .pcop_change_index = null, // we have already checked the target in the linker to make sure it is compatable - .pc_quanta = aout.getPCQuant(self.base.options.target.cpu.arch) catch unreachable, + .pc_quanta = aout.getPCQuant(target.cpu.arch) catch unreachable, }; defer dbg_info_output.dbg_line.deinit(); @@ -658,6 +662,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No } const gpa = comp.gpa; + const target = comp.root_mod.resolved_target.result; const tracy = trace(@src()); defer tracy.end(); @@ -749,9 +754,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No atom.offset = off; log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; if (mod.decl_exports.get(decl_index)) |exports| { @@ -778,9 +783,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No text_i += code.len; text_atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[text_atom.sym_index.?].value = off; } @@ -791,9 +796,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const val = self.getAddr(text_i, .t); self.syms.items[etext_atom.sym_index.?].value = val; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[etext_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(val)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[etext_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(val)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[etext_atom.got_index.? * 8 ..][0..8], val, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[etext_atom.got_index.? * 8 ..][0..8], val, target.cpu.arch.endian()); } } // global offset table is in data @@ -815,9 +820,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; if (mod.decl_exports.get(decl_index)) |exports| { @@ -838,9 +843,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; } @@ -859,9 +864,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; } @@ -879,9 +884,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No data_i += code.len; data_atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[data_atom.sym_index.?].value = off; } @@ -891,9 +896,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const val = self.getAddr(data_i, .b); self.syms.items[edata_atom.sym_index.?].value = val; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[edata_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(val)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[edata_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(val)), target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[edata_atom.got_index.? * 8 ..][0..8], val, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[edata_atom.got_index.? * 8 ..][0..8], val, target.cpu.arch.endian()); } } // end symbol (same as edata because native backends don't do .bss yet) @@ -902,10 +907,10 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const val = self.getAddr(data_i, .b); self.syms.items[end_atom.sym_index.?].value = val; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[end_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(val)), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[end_atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(val)), target.cpu.arch.endian()); } else { log.debug("write end (got_table[0x{x}] = 0x{x})", .{ end_atom.got_index.? * 8, val }); - mem.writeInt(u64, got_table[end_atom.got_index.? * 8 ..][0..8], val, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[end_atom.got_index.? * 8 ..][0..8], val, target.cpu.arch.endian()); } } } @@ -942,7 +947,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const source_atom = self.getAtom(source_atom_index); const source_atom_symbol = self.syms.items[source_atom.sym_index.?]; const code = source_atom.code.getCode(self); - const endian = self.base.options.target.cpu.arch.endian(); + const endian = target.cpu.arch.endian(); for (kv.value_ptr.items) |reloc| { const offset = reloc.offset; const addend = reloc.addend; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 28eeaedba4..0a728c7b6b 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1119,7 +1119,8 @@ fn validateFeatures( to_emit: *[@typeInfo(types.Feature.Tag).Enum.fields.len]bool, emit_features_count: *u32, ) !void { - const cpu_features = wasm.base.options.target.cpu.features; + const target = wasm.base.comp.root_mod.resolved_target.result; + const cpu_features = target.cpu.features; const infer = cpu_features.isEmpty(); // when the user did not define any features, we infer them from linked objects. const known_features_count = @typeInfo(types.Feature.Tag).Enum.fields.len; @@ -1752,6 +1753,7 @@ pub fn getDeclVAddr( decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo, ) !u64 { + const target = wasm.base.comp.root_mod.resolved_target.result; const gpa = wasm.base.comp.gpa; const mod = wasm.base.comp.module.?; const decl = mod.declPtr(decl_index); @@ -1762,7 +1764,7 @@ pub fn getDeclVAddr( assert(reloc_info.parent_atom_index != 0); const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const atom = wasm.getAtomPtr(atom_index); - const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; + const is_wasm32 = target.cpu.arch == .wasm32; if (decl.ty.zigTypeTag(mod) == .Fn) { assert(reloc_info.addend == 0); // addend not allowed for function relocations // We found a function pointer, so add it to our table, @@ -1825,12 +1827,13 @@ pub fn lowerAnonDecl( pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { const gpa = wasm.base.comp.gpa; + const target = wasm.base.comp.root_mod.resolved_target.result; const atom_index = wasm.anon_decls.get(decl_val).?; const target_symbol_index = wasm.getAtom(atom_index).getSymbolIndex().?; const parent_atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const parent_atom = wasm.getAtomPtr(parent_atom_index); - const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; + const is_wasm32 = target.cpu.arch == .wasm32; const mod = wasm.base.comp.module.?; const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); if (ty.zigTypeTag(mod) == .Fn) { @@ -4557,7 +4560,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) ! break :blk null; }; - const target = wasm.base.options.target; + const target = wasm.base.comp.root_mod.resolved_target.result; const id_symlink_basename = "lld.id";