Merge remote-tracking branch 'origin/master' into llvm15

This commit is contained in:
Andrew Kelley 2022-09-09 10:26:17 -07:00
commit 37cdb5dbf9
52 changed files with 2479 additions and 1081 deletions

View File

@ -61,6 +61,7 @@ jobs:
- pwsh: | - pwsh: |
Set-Variable -Name ZIGINSTALLDIR -Value "$(Get-Location)\stage3-release" Set-Variable -Name ZIGINSTALLDIR -Value "$(Get-Location)\stage3-release"
Set-Variable -Name ZIGPREFIXPATH -Value "$(Get-Location)\$(ZIG_LLVM_CLANG_LLD_NAME)"
function CheckLastExitCode { function CheckLastExitCode {
if (!$?) { if (!$?) {
@ -72,8 +73,7 @@ jobs:
& "$ZIGINSTALLDIR\bin\zig.exe" build test docs ` & "$ZIGINSTALLDIR\bin\zig.exe" build test docs `
--search-prefix "$ZIGPREFIXPATH" ` --search-prefix "$ZIGPREFIXPATH" `
-Dstatic-llvm ` -Dstatic-llvm `
-Dskip-non-native ` -Dskip-non-native
-Dskip-stage2-tests
CheckLastExitCode CheckLastExitCode
name: test name: test
displayName: 'Test' displayName: 'Test'

View File

@ -1210,7 +1210,7 @@ fn genHtml(
var env_map = try process.getEnvMap(allocator); var env_map = try process.getEnvMap(allocator);
try env_map.put("ZIG_DEBUG_COLOR", "1"); try env_map.put("ZIG_DEBUG_COLOR", "1");
const host = try std.zig.system.NativeTargetInfo.detect(allocator, .{}); const host = try std.zig.system.NativeTargetInfo.detect(.{});
const builtin_code = try getBuiltinCode(allocator, &env_map, zig_exe); const builtin_code = try getBuiltinCode(allocator, &env_map, zig_exe);
for (toc.nodes) |node| { for (toc.nodes) |node| {
@ -1474,7 +1474,6 @@ fn genHtml(
.arch_os_abi = triple, .arch_os_abi = triple,
}); });
const target_info = try std.zig.system.NativeTargetInfo.detect( const target_info = try std.zig.system.NativeTargetInfo.detect(
allocator,
cross_target, cross_target,
); );
switch (host.getExternalExecutor(target_info, .{ switch (host.getExternalExecutor(target_info, .{

View File

@ -171,7 +171,7 @@ pub const Builder = struct {
const env_map = try allocator.create(EnvMap); const env_map = try allocator.create(EnvMap);
env_map.* = try process.getEnvMap(allocator); env_map.* = try process.getEnvMap(allocator);
const host = try NativeTargetInfo.detect(allocator, .{}); const host = try NativeTargetInfo.detect(.{});
const self = try allocator.create(Builder); const self = try allocator.create(Builder);
self.* = Builder{ self.* = Builder{
@ -1798,7 +1798,7 @@ pub const LibExeObjStep = struct {
} }
fn computeOutFileNames(self: *LibExeObjStep) void { fn computeOutFileNames(self: *LibExeObjStep) void {
self.target_info = NativeTargetInfo.detect(self.builder.allocator, self.target) catch self.target_info = NativeTargetInfo.detect(self.target) catch
unreachable; unreachable;
const target = self.target_info.target; const target = self.target_info.target;

View File

@ -158,7 +158,7 @@ fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
const host_name = builder.host.target.zigTriple(builder.allocator) catch unreachable; const host_name = builder.host.target.zigTriple(builder.allocator) catch unreachable;
const foreign_name = artifact.target.zigTriple(builder.allocator) catch unreachable; const foreign_name = artifact.target.zigTriple(builder.allocator) catch unreachable;
const target_info = std.zig.system.NativeTargetInfo.detect(builder.allocator, artifact.target) catch unreachable; const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch unreachable;
const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc; const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
switch (builder.host.getExternalExecutor(target_info, .{ switch (builder.host.getExternalExecutor(target_info, .{
.qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null, .qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,

View File

@ -990,6 +990,8 @@ pub const File = struct {
return index; return index;
} }
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize { pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
if (is_windows) { if (is_windows) {
return windows.ReadFile(self.handle, buffer, offset, self.intended_io_mode); return windows.ReadFile(self.handle, buffer, offset, self.intended_io_mode);
@ -1004,6 +1006,8 @@ pub const File = struct {
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it /// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the file reached the end. Reaching the end of a file is not an error condition. /// means the file reached the end. Reaching the end of a file is not an error condition.
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn preadAll(self: File, buffer: []u8, offset: u64) PReadError!usize { pub fn preadAll(self: File, buffer: []u8, offset: u64) PReadError!usize {
var index: usize = 0; var index: usize = 0;
while (index != buffer.len) { while (index != buffer.len) {
@ -1058,6 +1062,8 @@ pub const File = struct {
} }
/// See https://github.com/ziglang/zig/issues/7699 /// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn preadv(self: File, iovecs: []const os.iovec, offset: u64) PReadError!usize { pub fn preadv(self: File, iovecs: []const os.iovec, offset: u64) PReadError!usize {
if (is_windows) { if (is_windows) {
// TODO improve this to use ReadFileScatter // TODO improve this to use ReadFileScatter
@ -1079,6 +1085,8 @@ pub const File = struct {
/// The `iovecs` parameter is mutable because this function needs to mutate the fields in /// The `iovecs` parameter is mutable because this function needs to mutate the fields in
/// order to handle partial reads from the underlying OS layer. /// order to handle partial reads from the underlying OS layer.
/// See https://github.com/ziglang/zig/issues/7699 /// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn preadvAll(self: File, iovecs: []os.iovec, offset: u64) PReadError!usize { pub fn preadvAll(self: File, iovecs: []os.iovec, offset: u64) PReadError!usize {
if (iovecs.len == 0) return 0; if (iovecs.len == 0) return 0;
@ -1122,6 +1130,8 @@ pub const File = struct {
} }
} }
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize { pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize {
if (is_windows) { if (is_windows) {
return windows.WriteFile(self.handle, bytes, offset, self.intended_io_mode); return windows.WriteFile(self.handle, bytes, offset, self.intended_io_mode);
@ -1134,6 +1144,8 @@ pub const File = struct {
} }
} }
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwriteAll(self: File, bytes: []const u8, offset: u64) PWriteError!void { pub fn pwriteAll(self: File, bytes: []const u8, offset: u64) PWriteError!void {
var index: usize = 0; var index: usize = 0;
while (index < bytes.len) { while (index < bytes.len) {
@ -1179,6 +1191,8 @@ pub const File = struct {
} }
/// See https://github.com/ziglang/zig/issues/7699 /// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwritev(self: File, iovecs: []os.iovec_const, offset: u64) PWriteError!usize { pub fn pwritev(self: File, iovecs: []os.iovec_const, offset: u64) PWriteError!usize {
if (is_windows) { if (is_windows) {
// TODO improve this to use WriteFileScatter // TODO improve this to use WriteFileScatter
@ -1197,6 +1211,8 @@ pub const File = struct {
/// The `iovecs` parameter is mutable because this function needs to mutate the fields in /// The `iovecs` parameter is mutable because this function needs to mutate the fields in
/// order to handle partial writes from the underlying OS layer. /// order to handle partial writes from the underlying OS layer.
/// See https://github.com/ziglang/zig/issues/7699 /// See https://github.com/ziglang/zig/issues/7699
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pwritevAll(self: File, iovecs: []os.iovec_const, offset: u64) PWriteError!void { pub fn pwritevAll(self: File, iovecs: []os.iovec_const, offset: u64) PWriteError!void {
if (iovecs.len == 0) return; if (iovecs.len == 0) return;

View File

@ -36,6 +36,10 @@ pub const default_mode: ModeOverride = if (is_async) Mode.evented else .blocking
fn getStdOutHandle() os.fd_t { fn getStdOutHandle() os.fd_t {
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_x86_64) {
// TODO: this is just a temporary workaround until we advance x86 backend further along.
return os.windows.GetStdHandle(os.windows.STD_OUTPUT_HANDLE) catch os.windows.INVALID_HANDLE_VALUE;
}
return os.windows.peb().ProcessParameters.hStdOutput; return os.windows.peb().ProcessParameters.hStdOutput;
} }
@ -58,6 +62,10 @@ pub fn getStdOut() File {
fn getStdErrHandle() os.fd_t { fn getStdErrHandle() os.fd_t {
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_x86_64) {
// TODO: this is just a temporary workaround until we advance x86 backend further along.
return os.windows.GetStdHandle(os.windows.STD_ERROR_HANDLE) catch os.windows.INVALID_HANDLE_VALUE;
}
return os.windows.peb().ProcessParameters.hStdError; return os.windows.peb().ProcessParameters.hStdError;
} }
@ -80,6 +88,10 @@ pub fn getStdErr() File {
fn getStdInHandle() os.fd_t { fn getStdInHandle() os.fd_t {
if (builtin.os.tag == .windows) { if (builtin.os.tag == .windows) {
if (builtin.zig_backend == .stage2_x86_64) {
// TODO: this is just a temporary workaround until we advance x86 backend further along.
return os.windows.GetStdHandle(os.windows.STD_INPUT_HANDLE) catch os.windows.INVALID_HANDLE_VALUE;
}
return os.windows.peb().ProcessParameters.hStdInput; return os.windows.peb().ProcessParameters.hStdInput;
} }

View File

@ -2,7 +2,7 @@ const std = @import("std");
const uefi = std.os.uefi; const uefi = std.os.uefi;
const Status = uefi.Status; const Status = uefi.Status;
const EfiBlockMedia = extern struct { pub const EfiBlockMedia = extern struct {
/// The current media ID. If the media changes, this value is changed. /// The current media ID. If the media changes, this value is changed.
media_id: u32, media_id: u32,
@ -38,7 +38,7 @@ const EfiBlockMedia = extern struct {
optimal_transfer_length_granularity: u32, optimal_transfer_length_granularity: u32,
}; };
const BlockIoProtocol = extern struct { pub const BlockIoProtocol = extern struct {
const Self = @This(); const Self = @This();
revision: u64, revision: u64,

View File

@ -348,7 +348,13 @@ pub extern "kernel32" fn WriteFile(
in_out_lpOverlapped: ?*OVERLAPPED, in_out_lpOverlapped: ?*OVERLAPPED,
) callconv(WINAPI) BOOL; ) callconv(WINAPI) BOOL;
pub extern "kernel32" fn WriteFileEx(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpOverlapped: *OVERLAPPED, lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE) callconv(WINAPI) BOOL; pub extern "kernel32" fn WriteFileEx(
hFile: HANDLE,
lpBuffer: [*]const u8,
nNumberOfBytesToWrite: DWORD,
lpOverlapped: *OVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) callconv(WINAPI) BOOL;
pub extern "kernel32" fn LoadLibraryW(lpLibFileName: [*:0]const u16) callconv(WINAPI) ?HMODULE; pub extern "kernel32" fn LoadLibraryW(lpLibFileName: [*:0]const u16) callconv(WINAPI) ?HMODULE;

View File

@ -9,7 +9,7 @@ const builtin = @import("builtin");
pub fn suggestVectorSizeForCpu(comptime T: type, comptime cpu: std.Target.Cpu) ?usize { pub fn suggestVectorSizeForCpu(comptime T: type, comptime cpu: std.Target.Cpu) ?usize {
// This is guesswork, if you have better suggestions can add it or edit the current here // This is guesswork, if you have better suggestions can add it or edit the current here
// This can run in comptime only, but stage 1 fails at it, stage 2 can understand it // This can run in comptime only, but stage 1 fails at it, stage 2 can understand it
const element_bit_size = @maximum(8, std.math.ceilPowerOfTwo(T, @bitSizeOf(T)) catch unreachable); const element_bit_size = @maximum(8, std.math.ceilPowerOfTwo(u16, @bitSizeOf(T)) catch unreachable);
const vector_bit_size: u16 = blk: { const vector_bit_size: u16 = blk: {
if (cpu.arch.isX86()) { if (cpu.arch.isX86()) {
if (T == bool and std.Target.x86.featureSetHas(.prefer_mask_registers)) return 64; if (T == bool and std.Target.x86.featureSetHas(.prefer_mask_registers)) return 64;
@ -57,6 +57,15 @@ pub fn suggestVectorSize(comptime T: type) ?usize {
return suggestVectorSizeForCpu(T, builtin.cpu); return suggestVectorSizeForCpu(T, builtin.cpu);
} }
test "suggestVectorSizeForCpu works with signed and unsigned values" {
comptime var cpu = std.Target.Cpu.baseline(std.Target.Cpu.Arch.x86_64);
comptime cpu.features.addFeature(@enumToInt(std.Target.x86.Feature.avx512f));
const signed_integer_size = suggestVectorSizeForCpu(i32, cpu).?;
const unsigned_integer_size = suggestVectorSizeForCpu(u32, cpu).?;
try std.testing.expectEqual(@as(usize, 16), unsigned_integer_size);
try std.testing.expectEqual(@as(usize, 16), signed_integer_size);
}
fn vectorLength(comptime VectorType: type) comptime_int { fn vectorLength(comptime VectorType: type) comptime_int {
return switch (@typeInfo(VectorType)) { return switch (@typeInfo(VectorType)) {
.Vector => |info| info.len, .Vector => |info| info.len,

View File

@ -36,6 +36,10 @@ comptime {
if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) { if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) {
@export(main2, .{ .name = "main" }); @export(main2, .{ .name = "main" });
} }
} else if (builtin.os.tag == .windows) {
if (!@hasDecl(root, "wWinMainCRTStartup") and !@hasDecl(root, "mainCRTStartup")) {
@export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" });
}
} else if (builtin.os.tag == .wasi and @hasDecl(root, "main")) { } else if (builtin.os.tag == .wasi and @hasDecl(root, "main")) {
@export(wasiMain2, .{ .name = "_start" }); @export(wasiMain2, .{ .name = "_start" });
} else { } else {

View File

@ -28,6 +28,7 @@ pub const DetectError = error{
SystemFdQuotaExceeded, SystemFdQuotaExceeded,
DeviceBusy, DeviceBusy,
OSVersionDetectionFail, OSVersionDetectionFail,
Unexpected,
}; };
/// Given a `CrossTarget`, which specifies in detail which parts of the target should be detected /// Given a `CrossTarget`, which specifies in detail which parts of the target should be detected
@ -36,8 +37,7 @@ pub const DetectError = error{
/// relative to that. /// relative to that.
/// Any resources this function allocates are released before returning, and so there is no /// Any resources this function allocates are released before returning, and so there is no
/// deinitialization method. /// deinitialization method.
/// TODO Remove the Allocator requirement from this function. pub fn detect(cross_target: CrossTarget) DetectError!NativeTargetInfo {
pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch()); var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch());
if (cross_target.os_tag == null) { if (cross_target.os_tag == null) {
switch (builtin.target.os.tag) { switch (builtin.target.os.tag) {
@ -198,7 +198,7 @@ pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!Nativ
} orelse backup_cpu_detection: { } orelse backup_cpu_detection: {
break :backup_cpu_detection Target.Cpu.baseline(cpu_arch); break :backup_cpu_detection Target.Cpu.baseline(cpu_arch);
}; };
var result = try detectAbiAndDynamicLinker(allocator, cpu, os, cross_target); var result = try detectAbiAndDynamicLinker(cpu, os, cross_target);
// For x86, we need to populate some CPU feature flags depending on architecture // For x86, we need to populate some CPU feature flags depending on architecture
// and mode: // and mode:
// * 16bit_mode => if the abi is code16 // * 16bit_mode => if the abi is code16
@ -235,13 +235,20 @@ pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!Nativ
return result; return result;
} }
/// First we attempt to use the executable's own binary. If it is dynamically /// In the past, this function attempted to use the executable's own binary if it was dynamically
/// linked, then it should answer both the C ABI question and the dynamic linker question. /// linked to answer both the C ABI question and the dynamic linker question. However, this
/// If it is statically linked, then we try /usr/bin/env (or the file it references in shebang). If that does not provide the answer, then /// could be problematic on a system that uses a RUNPATH for the compiler binary, locking
/// we fall back to the defaults. /// it to an older glibc version, while system binaries such as /usr/bin/env use a newer glibc
/// TODO Remove the Allocator requirement from this function. /// version. The problem is that libc.so.6 glibc version will match that of the system while
/// the dynamic linker will match that of the compiler binary. Executables with these versions
/// mismatching will fail to run.
///
/// Therefore, this function works the same regardless of whether the compiler binary is
/// dynamically or statically linked. It inspects `/usr/bin/env` as an ELF file to find the
/// answer to these questions, or if there is a shebang line, then it chases the referenced
/// file recursively. If that does not provide the answer, then the function falls back to
/// defaults.
fn detectAbiAndDynamicLinker( fn detectAbiAndDynamicLinker(
allocator: Allocator,
cpu: Target.Cpu, cpu: Target.Cpu,
os: Target.Os, os: Target.Os,
cross_target: CrossTarget, cross_target: CrossTarget,
@ -279,8 +286,8 @@ fn detectAbiAndDynamicLinker(
const ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os.tag, cpu.arch); const ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os.tag, cpu.arch);
for (all_abis) |abi| { for (all_abis) |abi| {
// This may be a nonsensical parameter. We detect this with error.UnknownDynamicLinkerPath and // This may be a nonsensical parameter. We detect this with
// skip adding it to `ld_info_list`. // error.UnknownDynamicLinkerPath and skip adding it to `ld_info_list`.
const target: Target = .{ const target: Target = .{
.cpu = cpu, .cpu = cpu,
.os = os, .os = os,
@ -300,64 +307,6 @@ fn detectAbiAndDynamicLinker(
// Best case scenario: the executable is dynamically linked, and we can iterate // Best case scenario: the executable is dynamically linked, and we can iterate
// over our own shared objects and find a dynamic linker. // over our own shared objects and find a dynamic linker.
self_exe: {
const lib_paths = try std.process.getSelfExeSharedLibPaths(allocator);
defer {
for (lib_paths) |lib_path| {
allocator.free(lib_path);
}
allocator.free(lib_paths);
}
var found_ld_info: LdInfo = undefined;
var found_ld_path: [:0]const u8 = undefined;
// Look for dynamic linker.
// This is O(N^M) but typical case here is N=2 and M=10.
find_ld: for (lib_paths) |lib_path| {
for (ld_info_list) |ld_info| {
const standard_ld_basename = fs.path.basename(ld_info.ld.get().?);
if (std.mem.endsWith(u8, lib_path, standard_ld_basename)) {
found_ld_info = ld_info;
found_ld_path = lib_path;
break :find_ld;
}
}
} else break :self_exe;
// Look for glibc version.
var os_adjusted = os;
if (builtin.target.os.tag == .linux and found_ld_info.abi.isGnu() and
cross_target.glibc_version == null)
{
for (lib_paths) |lib_path| {
if (std.mem.endsWith(u8, lib_path, glibc_so_basename)) {
os_adjusted.version_range.linux.glibc = glibcVerFromSO(lib_path) catch |err| switch (err) {
error.UnrecognizedGnuLibCFileName => continue,
error.InvalidGnuLibCVersion => continue,
error.GnuLibCVersionUnavailable => continue,
else => |e| return e,
};
break;
}
}
}
var result: NativeTargetInfo = .{
.target = .{
.cpu = cpu,
.os = os_adjusted,
.abi = cross_target.abi orelse found_ld_info.abi,
.ofmt = cross_target.ofmt orelse Target.ObjectFormat.default(os_adjusted.tag, cpu.arch),
},
.dynamic_linker = if (cross_target.dynamic_linker.get() == null)
DynamicLinker.init(found_ld_path)
else
cross_target.dynamic_linker,
};
return result;
}
const elf_file = blk: { const elf_file = blk: {
// This block looks for a shebang line in /usr/bin/env, // This block looks for a shebang line in /usr/bin/env,
// if it finds one, then instead of using /usr/bin/env as the ELF file to examine, it uses the file it references instead, // if it finds one, then instead of using /usr/bin/env as the ELF file to examine, it uses the file it references instead,
@ -369,7 +318,7 @@ fn detectAbiAndDynamicLinker(
// #! (2) + 255 (max length of shebang line since Linux 5.1) + \n (1) // #! (2) + 255 (max length of shebang line since Linux 5.1) + \n (1)
var buffer: [258]u8 = undefined; var buffer: [258]u8 = undefined;
while (true) { while (true) {
const file = std.fs.openFileAbsolute(file_name, .{}) catch |err| switch (err) { const file = fs.openFileAbsolute(file_name, .{}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable, error.NoSpaceLeft => unreachable,
error.NameTooLong => unreachable, error.NameTooLong => unreachable,
error.PathAlreadyExists => unreachable, error.PathAlreadyExists => unreachable,
@ -390,44 +339,35 @@ fn detectAbiAndDynamicLinker(
error.FileTooBig, error.FileTooBig,
error.Unexpected, error.Unexpected,
=> |e| { => |e| {
std.log.warn("Encoutered error: {s}, falling back to default ABI and dynamic linker.\n", .{@errorName(e)}); std.log.warn("Encountered error: {s}, falling back to default ABI and dynamic linker.\n", .{@errorName(e)});
return defaultAbiAndDynamicLinker(cpu, os, cross_target); return defaultAbiAndDynamicLinker(cpu, os, cross_target);
}, },
else => |e| return e, else => |e| return e,
}; };
errdefer file.close();
const line = file.reader().readUntilDelimiter(&buffer, '\n') catch |err| switch (err) { const len = preadMin(file, &buffer, 0, buffer.len) catch |err| switch (err) {
error.IsDir => unreachable, // Handled before error.UnexpectedEndOfFile,
error.AccessDenied => unreachable, error.UnableToReadElfFile,
error.WouldBlock => unreachable, // Did not request blocking mode
error.OperationAborted => unreachable, // Windows-only
error.BrokenPipe => unreachable,
error.ConnectionResetByPeer => unreachable,
error.ConnectionTimedOut => unreachable,
error.InputOutput => unreachable,
error.Unexpected => unreachable,
error.StreamTooLong,
error.EndOfStream,
error.NotOpenForReading,
=> break :blk file, => break :blk file,
else => |e| { else => |e| return e,
file.close();
return e;
},
}; };
const newline = mem.indexOfScalar(u8, buffer[0..len], '\n') orelse break :blk file;
const line = buffer[0..newline];
if (!mem.startsWith(u8, line, "#!")) break :blk file; if (!mem.startsWith(u8, line, "#!")) break :blk file;
var it = std.mem.tokenize(u8, line[2..], " "); var it = mem.tokenize(u8, line[2..], " ");
file.close();
file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, cross_target); file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, cross_target);
file.close();
} }
}; };
defer elf_file.close(); defer elf_file.close();
// If Zig is statically linked, such as via distributed binary static builds, the above // If Zig is statically linked, such as via distributed binary static builds, the above
// trick (block self_exe) won't work. The next thing we fall back to is the same thing, but for elf_file. // trick (block self_exe) won't work. The next thing we fall back to is the same thing, but for elf_file.
// TODO: inline this function and combine the buffer we already read above to find
// the possible shebang line with the buffer we use for the ELF header.
return abiAndDynamicLinkerFromFile(elf_file, cpu, os, ld_info_list, cross_target) catch |err| switch (err) { return abiAndDynamicLinkerFromFile(elf_file, cpu, os, ld_info_list, cross_target) catch |err| switch (err) {
error.FileSystem, error.FileSystem,
error.SystemResources, error.SystemResources,
@ -447,31 +387,196 @@ fn detectAbiAndDynamicLinker(
error.NameTooLong, error.NameTooLong,
// Finally, we fall back on the standard path. // Finally, we fall back on the standard path.
=> |e| { => |e| {
std.log.warn("Encoutered error: {s}, falling back to default ABI and dynamic linker.\n", .{@errorName(e)}); std.log.warn("Encountered error: {s}, falling back to default ABI and dynamic linker.\n", .{@errorName(e)});
return defaultAbiAndDynamicLinker(cpu, os, cross_target); return defaultAbiAndDynamicLinker(cpu, os, cross_target);
}, },
}; };
} }
const glibc_so_basename = "libc.so.6"; fn glibcVerFromRPath(rpath: []const u8) !std.builtin.Version {
var dir = fs.cwd().openDir(rpath, .{}) catch |err| switch (err) {
fn glibcVerFromSO(so_path: [:0]const u8) !std.builtin.Version { error.NameTooLong => unreachable,
var link_buf: [std.os.PATH_MAX]u8 = undefined; error.InvalidUtf8 => unreachable,
const link_name = std.os.readlinkZ(so_path.ptr, &link_buf) catch |err| switch (err) { error.BadPathName => unreachable,
error.AccessDenied => return error.GnuLibCVersionUnavailable, error.DeviceBusy => unreachable,
error.FileSystem => return error.FileSystem,
error.SymLinkLoop => return error.SymLinkLoop, error.FileNotFound,
error.NotDir,
error.InvalidHandle,
error.AccessDenied,
error.NoDevice,
=> return error.GLibCNotFound,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.SystemResources,
error.SymLinkLoop,
error.Unexpected,
=> |e| return e,
};
defer dir.close();
// Now we have a candidate for the path to libc shared object. In
// the past, we used readlink() here because the link name would
// reveal the glibc version. However, in more recent GNU/Linux
// installations, there is no symlink. Thus we instead use a more
// robust check of opening the libc shared object and looking at the
// .dynstr section, and finding the max version number of symbols
// that start with "GLIBC_2.".
const glibc_so_basename = "libc.so.6";
var f = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) {
error.NameTooLong => unreachable, error.NameTooLong => unreachable,
error.NotLink => return error.GnuLibCVersionUnavailable,
error.FileNotFound => return error.GnuLibCVersionUnavailable,
error.SystemResources => return error.SystemResources,
error.NotDir => return error.GnuLibCVersionUnavailable,
error.Unexpected => return error.GnuLibCVersionUnavailable,
error.InvalidUtf8 => unreachable, // Windows only error.InvalidUtf8 => unreachable, // Windows only
error.BadPathName => unreachable, // Windows only error.BadPathName => unreachable, // Windows only
error.UnsupportedReparsePointType => unreachable, // Windows only error.PipeBusy => unreachable, // Windows-only
error.SharingViolation => unreachable, // Windows-only
error.FileLocksNotSupported => unreachable, // No lock requested.
error.NoSpaceLeft => unreachable, // read-only
error.PathAlreadyExists => unreachable, // read-only
error.DeviceBusy => unreachable, // read-only
error.FileBusy => unreachable, // read-only
error.InvalidHandle => unreachable, // should not be in the error set
error.WouldBlock => unreachable, // not using O_NONBLOCK
error.NoDevice => unreachable, // not asking for a special device
error.AccessDenied,
error.FileNotFound,
error.NotDir,
error.IsDir,
=> return error.GLibCNotFound,
error.FileTooBig => return error.Unexpected,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.SystemResources,
error.SymLinkLoop,
error.Unexpected,
=> |e| return e,
}; };
return glibcVerFromLinkName(link_name, "libc-"); defer f.close();
return glibcVerFromSoFile(f) catch |err| switch (err) {
error.InvalidElfMagic,
error.InvalidElfEndian,
error.InvalidElfClass,
error.InvalidElfFile,
error.InvalidElfVersion,
error.InvalidGnuLibCVersion,
error.UnexpectedEndOfFile,
=> return error.GLibCNotFound,
error.SystemResources,
error.UnableToReadElfFile,
error.Unexpected,
error.FileSystem,
=> |e| return e,
};
}
fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version {
var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
_ = try preadMin(file, &hdr_buf, 0, hdr_buf.len);
const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf);
const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf);
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) {
elf.ELFDATA2LSB => .Little,
elf.ELFDATA2MSB => .Big,
else => return error.InvalidElfEndian,
};
const need_bswap = elf_endian != native_endian;
if (hdr32.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
const is_64 = switch (hdr32.e_ident[elf.EI_CLASS]) {
elf.ELFCLASS32 => false,
elf.ELFCLASS64 => true,
else => return error.InvalidElfClass,
};
const shstrndx = elfInt(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx);
var shoff = elfInt(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff);
const shentsize = elfInt(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize);
const str_section_off = shoff + @as(u64, shentsize) * @as(u64, shstrndx);
var sh_buf: [16 * @sizeOf(elf.Elf64_Shdr)]u8 align(@alignOf(elf.Elf64_Shdr)) = undefined;
if (sh_buf.len < shentsize) return error.InvalidElfFile;
_ = try preadMin(file, &sh_buf, str_section_off, shentsize);
const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf));
const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf));
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum);
var sh_i: u16 = 0;
const dynstr: struct { offset: u64, size: u64 } = find_dyn_str: while (sh_i < shnum) {
// Reserve some bytes so that we can deref the 64-bit struct fields
// even when the ELF file is 32-bits.
const sh_reserve: usize = @sizeOf(elf.Elf64_Shdr) - @sizeOf(elf.Elf32_Shdr);
const sh_read_byte_len = try preadMin(
file,
sh_buf[0 .. sh_buf.len - sh_reserve],
shoff,
shentsize,
);
var sh_buf_i: usize = 0;
while (sh_buf_i < sh_read_byte_len and sh_i < shnum) : ({
sh_i += 1;
shoff += shentsize;
sh_buf_i += shentsize;
}) {
const sh32 = @ptrCast(
*elf.Elf32_Shdr,
@alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]),
);
const sh64 = @ptrCast(
*elf.Elf64_Shdr,
@alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]),
);
const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
// TODO this pointer cast should not be necessary
const sh_name = mem.sliceTo(std.meta.assumeSentinel(shstrtab[sh_name_off..].ptr, 0), 0);
if (mem.eql(u8, sh_name, ".dynstr")) {
break :find_dyn_str .{
.offset = elfInt(is_64, need_bswap, sh32.sh_offset, sh64.sh_offset),
.size = elfInt(is_64, need_bswap, sh32.sh_size, sh64.sh_size),
};
}
}
} else return error.InvalidGnuLibCVersion;
// Here we loop over all the strings in the dynstr string table, assuming that any
// strings that start with "GLIBC_2." indicate the existence of such a glibc version,
// and furthermore, that the system-installed glibc is at minimum that version.
// Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system.
// Here I use double this value plus some headroom. This makes it only need
// a single read syscall here.
var buf: [80000]u8 = undefined;
if (buf.len < dynstr.size) return error.InvalidGnuLibCVersion;
const dynstr_size = @intCast(usize, dynstr.size);
const dynstr_bytes = buf[0..dynstr_size];
_ = try preadMin(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len);
var it = mem.split(u8, dynstr_bytes, &.{0});
var max_ver: std.builtin.Version = .{ .major = 2, .minor = 2, .patch = 5 };
while (it.next()) |s| {
if (mem.startsWith(u8, s, "GLIBC_2.")) {
const chopped = s["GLIBC_".len..];
const ver = std.builtin.Version.parse(chopped) catch |err| switch (err) {
error.Overflow => return error.InvalidGnuLibCVersion,
error.InvalidCharacter => return error.InvalidGnuLibCVersion,
error.InvalidVersion => return error.InvalidGnuLibCVersion,
};
switch (ver.order(max_ver)) {
.gt => max_ver = ver,
.lt, .eq => continue,
}
}
}
return max_ver;
} }
fn glibcVerFromLinkName(link_name: []const u8, prefix: []const u8) !std.builtin.Version { fn glibcVerFromLinkName(link_name: []const u8, prefix: []const u8) !std.builtin.Version {
@ -641,7 +746,6 @@ pub fn abiAndDynamicLinkerFromFile(
if (builtin.target.os.tag == .linux and result.target.isGnuLibC() and if (builtin.target.os.tag == .linux and result.target.isGnuLibC() and
cross_target.glibc_version == null) cross_target.glibc_version == null)
{ {
if (rpath_offset) |rpoff| {
const shstrndx = elfInt(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx); const shstrndx = elfInt(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx);
var shoff = elfInt(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff); var shoff = elfInt(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff);
@ -700,6 +804,7 @@ pub fn abiAndDynamicLinkerFromFile(
} else null; } else null;
if (dynstr) |ds| { if (dynstr) |ds| {
if (rpath_offset) |rpoff| {
// TODO this pointer cast should not be necessary // TODO this pointer cast should not be necessary
const rpoff_usize = std.math.cast(usize, rpoff) orelse return error.InvalidElfFile; const rpoff_usize = std.math.cast(usize, rpoff) orelse return error.InvalidElfFile;
if (rpoff_usize > ds.size) return error.InvalidElfFile; if (rpoff_usize > ds.size) return error.InvalidElfFile;
@ -713,64 +818,31 @@ pub fn abiAndDynamicLinkerFromFile(
const rpath_list = mem.sliceTo(std.meta.assumeSentinel(strtab.ptr, 0), 0); const rpath_list = mem.sliceTo(std.meta.assumeSentinel(strtab.ptr, 0), 0);
var it = mem.tokenize(u8, rpath_list, ":"); var it = mem.tokenize(u8, rpath_list, ":");
while (it.next()) |rpath| { while (it.next()) |rpath| {
var dir = fs.cwd().openDir(rpath, .{}) catch |err| switch (err) { if (glibcVerFromRPath(rpath)) |ver| {
error.NameTooLong => unreachable, result.target.os.version_range.linux.glibc = ver;
error.InvalidUtf8 => unreachable, return result;
error.BadPathName => unreachable, } else |err| switch (err) {
error.DeviceBusy => unreachable, error.GLibCNotFound => continue,
else => |e| return e,
error.FileNotFound,
error.NotDir,
error.InvalidHandle,
error.AccessDenied,
error.NoDevice,
=> continue,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.SystemResources,
error.SymLinkLoop,
error.Unexpected,
=> |e| return e,
};
defer dir.close();
var link_buf: [std.os.PATH_MAX]u8 = undefined;
const link_name = std.os.readlinkatZ(
dir.fd,
glibc_so_basename,
&link_buf,
) catch |err| switch (err) {
error.NameTooLong => unreachable,
error.InvalidUtf8 => unreachable, // Windows only
error.BadPathName => unreachable, // Windows only
error.UnsupportedReparsePointType => unreachable, // Windows only
error.AccessDenied,
error.FileNotFound,
error.NotLink,
error.NotDir,
=> continue,
error.SystemResources,
error.FileSystem,
error.SymLinkLoop,
error.Unexpected,
=> |e| return e,
};
result.target.os.version_range.linux.glibc = glibcVerFromLinkName(
link_name,
"libc-",
) catch |err| switch (err) {
error.UnrecognizedGnuLibCFileName,
error.InvalidGnuLibCVersion,
=> continue,
};
break;
} }
} }
} else if (result.dynamic_linker.get()) |dl_path| glibc_ver: { }
// There is no DT_RUNPATH but we can try to see if the information is }
if (result.dynamic_linker.get()) |dl_path| glibc_ver: {
// There is no DT_RUNPATH so we try to find libc.so.6 inside the same
// directory as the dynamic linker.
if (fs.path.dirname(dl_path)) |rpath| {
if (glibcVerFromRPath(rpath)) |ver| {
result.target.os.version_range.linux.glibc = ver;
return result;
} else |err| switch (err) {
error.GLibCNotFound => {},
else => |e| return e,
}
}
// So far, no luck. Next we try to see if the information is
// present in the symlink data for the dynamic linker path. // present in the symlink data for the dynamic linker path.
var link_buf: [std.os.PATH_MAX]u8 = undefined; var link_buf: [std.os.PATH_MAX]u8 = undefined;
const link_name = std.os.readlink(dl_path, &link_buf) catch |err| switch (err) { const link_name = std.os.readlink(dl_path, &link_buf) catch |err| switch (err) {
@ -799,6 +871,36 @@ pub fn abiAndDynamicLinkerFromFile(
error.InvalidGnuLibCVersion, error.InvalidGnuLibCVersion,
=> break :glibc_ver, => break :glibc_ver,
}; };
return result;
}
// Nothing worked so far. Finally we fall back to hard-coded search paths.
// Some distros such as Debian keep their libc.so.6 in `/lib/$triple/`.
var path_buf: [std.os.PATH_MAX]u8 = undefined;
var index: usize = 0;
const prefix = "/lib/";
const cpu_arch = @tagName(result.target.cpu.arch);
const os_tag = @tagName(result.target.os.tag);
const abi = @tagName(result.target.abi);
mem.copy(u8, path_buf[index..], prefix);
index += prefix.len;
mem.copy(u8, path_buf[index..], cpu_arch);
index += cpu_arch.len;
path_buf[index] = '-';
index += 1;
mem.copy(u8, path_buf[index..], os_tag);
index += os_tag.len;
path_buf[index] = '-';
index += 1;
mem.copy(u8, path_buf[index..], abi);
index += abi.len;
const rpath = path_buf[0..index];
if (glibcVerFromRPath(rpath)) |ver| {
result.target.os.version_range.linux.glibc = ver;
return result;
} else |err| switch (err) {
error.GLibCNotFound => {},
else => |e| return e,
} }
} }

View File

@ -878,6 +878,9 @@ pub const InitOptions = struct {
linker_shared_memory: bool = false, linker_shared_memory: bool = false,
linker_global_base: ?u64 = null, linker_global_base: ?u64 = null,
linker_export_symbol_names: []const []const u8 = &.{}, linker_export_symbol_names: []const []const u8 = &.{},
linker_print_gc_sections: bool = false,
linker_print_icf_sections: bool = false,
linker_print_map: bool = false,
each_lib_rpath: ?bool = null, each_lib_rpath: ?bool = null,
build_id: ?bool = null, build_id: ?bool = null,
disable_c_depfile: bool = false, disable_c_depfile: bool = false,
@ -1727,6 +1730,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.shared_memory = options.linker_shared_memory, .shared_memory = options.linker_shared_memory,
.global_base = options.linker_global_base, .global_base = options.linker_global_base,
.export_symbol_names = options.linker_export_symbol_names, .export_symbol_names = options.linker_export_symbol_names,
.print_gc_sections = options.linker_print_gc_sections,
.print_icf_sections = options.linker_print_icf_sections,
.print_map = options.linker_print_map,
.z_nodelete = options.linker_z_nodelete, .z_nodelete = options.linker_z_nodelete,
.z_notext = options.linker_z_notext, .z_notext = options.linker_z_notext,
.z_defs = options.linker_z_defs, .z_defs = options.linker_z_defs,

View File

@ -345,6 +345,15 @@ pub const CaptureScope = struct {
/// During sema, this map is backed by the gpa. Once sema completes, /// During sema, this map is backed by the gpa. Once sema completes,
/// it is reallocated using the value_arena. /// it is reallocated using the value_arena.
captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{}, captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{},
pub fn failed(noalias self: *const @This()) bool {
return self.captures.available == 0 and self.captures.size == std.math.maxInt(u32);
}
pub fn fail(noalias self: *@This()) void {
self.captures.available = 0;
self.captures.size = std.math.maxInt(u32);
}
}; };
pub const WipCaptureScope = struct { pub const WipCaptureScope = struct {
@ -383,6 +392,7 @@ pub const WipCaptureScope = struct {
pub fn deinit(noalias self: *@This()) void { pub fn deinit(noalias self: *@This()) void {
if (!self.finalized) { if (!self.finalized) {
self.scope.captures.deinit(self.gpa); self.scope.captures.deinit(self.gpa);
self.scope.fail();
} }
self.* = undefined; self.* = undefined;
} }
@ -4274,11 +4284,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
const comp = mod.comp; const comp = mod.comp;
if (comp.bin_file.options.emit == null and const no_bin_file = (comp.bin_file.options.emit == null and
comp.emit_asm == null and comp.emit_asm == null and
comp.emit_llvm_ir == null and comp.emit_llvm_ir == null and
comp.emit_llvm_bc == null) comp.emit_llvm_bc == null);
{
const dump_air = builtin.mode == .Debug and comp.verbose_air;
if (no_bin_file and !dump_air) {
return; return;
} }
@ -4286,7 +4299,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
var liveness = try Liveness.analyze(gpa, air); var liveness = try Liveness.analyze(gpa, air);
defer liveness.deinit(gpa); defer liveness.deinit(gpa);
if (builtin.mode == .Debug and comp.verbose_air) { if (dump_air) {
const fqn = try decl.getFullyQualifiedName(mod); const fqn = try decl.getFullyQualifiedName(mod);
defer mod.gpa.free(fqn); defer mod.gpa.free(fqn);
@ -4295,6 +4308,10 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void {
std.debug.print("# End Function AIR: {s}\n\n", .{fqn}); std.debug.print("# End Function AIR: {s}\n\n", .{fqn});
} }
if (no_bin_file) {
return;
}
comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) { comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => { error.AnalysisFail => {

View File

@ -5956,7 +5956,6 @@ fn analyzeCall(
error.NeededSourceLocation => { error.NeededSourceLocation => {
_ = sema.inst_map.remove(inst); _ = sema.inst_map.remove(inst);
const decl = sema.mod.declPtr(block.src_decl); const decl = sema.mod.declPtr(block.src_decl);
child_block.src_decl = block.src_decl;
try sema.analyzeInlineCallArg( try sema.analyzeInlineCallArg(
block, block,
&child_block, &child_block,
@ -13740,6 +13739,16 @@ fn zirClosureGet(
const tv = while (true) { const tv = while (true) {
// Note: We don't need to add a dependency here, because // Note: We don't need to add a dependency here, because
// decls always depend on their lexical parents. // decls always depend on their lexical parents.
// Fail this decl if a scope it depended on failed.
if (scope.failed()) {
if (sema.owner_func) |owner_func| {
owner_func.state = .dependency_failure;
} else {
sema.owner_decl.analysis = .dependency_failure;
}
return error.AnalysisFail;
}
if (scope.captures.getPtr(inst_data.inst)) |tv| { if (scope.captures.getPtr(inst_data.inst)) |tv| {
break tv; break tv;
} }
@ -18076,8 +18085,8 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
const target = sema.mod.getTarget(); const target = sema.mod.getTarget();
try sema.resolveTypeLayout(block, lhs_src, ty); try sema.resolveTypeLayout(block, lhs_src, ty);
switch (ty.tag()) { switch (ty.zigTypeTag()) {
.@"struct", .tuple, .anon_struct => {}, .Struct => {},
else => { else => {
const msg = msg: { const msg = msg: {
const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)}); const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)});
@ -19617,28 +19626,19 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const dest_ptr = try sema.resolveInst(extra.dest); const uncasted_dest_ptr = try sema.resolveInst(extra.dest);
const dest_ptr_ty = sema.typeOf(dest_ptr);
try sema.checkPtrOperand(block, dest_src, dest_ptr_ty); // TODO AstGen's coerced_ty cannot handle volatile here
if (dest_ptr_ty.isConstPtr()) { var dest_ptr_info = Type.initTag(.manyptr_u8).ptrInfo().data;
return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty.fmt(sema.mod)}); dest_ptr_info.@"volatile" = sema.typeOf(uncasted_dest_ptr).isVolatilePtr();
} const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, dest_ptr_info);
const dest_ptr = try sema.coerce(block, dest_ptr_ty, uncasted_dest_ptr, dest_src);
const uncasted_src_ptr = try sema.resolveInst(extra.source); const uncasted_src_ptr = try sema.resolveInst(extra.source);
const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); var src_ptr_info = Type.initTag(.manyptr_const_u8).ptrInfo().data;
try sema.checkPtrOperand(block, src_src, uncasted_src_ptr_ty); src_ptr_info.@"volatile" = sema.typeOf(uncasted_src_ptr).isVolatilePtr();
const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; const src_ptr_ty = try Type.ptr(sema.arena, sema.mod, src_ptr_info);
const wanted_src_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ const src_ptr = try sema.coerce(block, src_ptr_ty, uncasted_src_ptr, src_src);
.pointee_type = dest_ptr_ty.elemType2(),
.@"align" = src_ptr_info.@"align",
.@"addrspace" = src_ptr_info.@"addrspace",
.mutable = false,
.@"allowzero" = src_ptr_info.@"allowzero",
.@"volatile" = src_ptr_info.@"volatile",
.size = .Many,
});
const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src);
const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src); const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src);
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: {
@ -19674,14 +19674,15 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const dest_ptr = try sema.resolveInst(extra.dest); const uncasted_dest_ptr = try sema.resolveInst(extra.dest);
const dest_ptr_ty = sema.typeOf(dest_ptr);
try sema.checkPtrOperand(block, dest_src, dest_ptr_ty); // TODO AstGen's coerced_ty cannot handle volatile here
if (dest_ptr_ty.isConstPtr()) { var ptr_info = Type.initTag(.manyptr_u8).ptrInfo().data;
return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty.fmt(sema.mod)}); ptr_info.@"volatile" = sema.typeOf(uncasted_dest_ptr).isVolatilePtr();
} const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info);
const elem_ty = dest_ptr_ty.elemType2(); const dest_ptr = try sema.coerce(block, dest_ptr_ty, uncasted_dest_ptr, dest_src);
const value = try sema.coerce(block, elem_ty, try sema.resolveInst(extra.byte), value_src);
const value = try sema.coerce(block, Type.u8, try sema.resolveInst(extra.byte), value_src);
const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src); const len = try sema.coerce(block, Type.usize, try sema.resolveInst(extra.byte_count), len_src);
const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: {
@ -26013,6 +26014,7 @@ fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref
.pointee_type = decl_tv.ty, .pointee_type = decl_tv.ty,
.mutable = false, .mutable = false,
.@"addrspace" = decl.@"addrspace", .@"addrspace" = decl.@"addrspace",
.@"align" = decl.@"align",
}), }),
try Value.Tag.decl_ref.create(sema.arena, decl_index), try Value.Tag.decl_ref.create(sema.arena, decl_index),
); );

View File

@ -666,6 +666,10 @@ pub fn deinit(self: *Self) void {
self.locals.deinit(self.gpa); self.locals.deinit(self.gpa);
self.mir_instructions.deinit(self.gpa); self.mir_instructions.deinit(self.gpa);
self.mir_extra.deinit(self.gpa); self.mir_extra.deinit(self.gpa);
self.free_locals_i32.deinit(self.gpa);
self.free_locals_i64.deinit(self.gpa);
self.free_locals_f32.deinit(self.gpa);
self.free_locals_f64.deinit(self.gpa);
self.* = undefined; self.* = undefined;
} }

File diff suppressed because it is too large Load Diff

View File

@ -283,10 +283,11 @@ fn mirPushPopRegisterList(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerErro
const ops = emit.mir.instructions.items(.ops)[inst].decode(); const ops = emit.mir.instructions.items(.ops)[inst].decode();
const payload = emit.mir.instructions.items(.data)[inst].payload; const payload = emit.mir.instructions.items(.data)[inst].payload;
const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data; const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data;
const reg_list = Mir.RegisterList(Register, &abi.callee_preserved_regs).fromInt(save_reg_list.register_list);
var disp: i32 = -@intCast(i32, save_reg_list.stack_end); var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
inline for (abi.callee_preserved_regs) |reg| { const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
if (reg_list.isSet(reg)) { const callee_preserved_regs = abi.getCalleePreservedRegs(emit.target.*);
for (callee_preserved_regs) |reg| {
if (reg_list.isSet(callee_preserved_regs, reg)) {
switch (tag) { switch (tag) {
.push => try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{ .push => try lowerToMrEnc(.mov, RegisterOrMemory.mem(.qword_ptr, .{
.disp = @bitCast(u32, disp), .disp = @bitCast(u32, disp),
@ -614,14 +615,15 @@ inline fn immOpSize(u_imm: u32) u6 {
fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void { fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode(); const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags; const scale = ops.flags;
const imm = emit.mir.instructions.items(.data)[inst].imm; const payload = emit.mir.instructions.items(.data)[inst].payload;
// OP reg1, [reg2 + scale*rcx + imm32] const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
// OP reg1, [reg2 + scale*index + imm32]
const scale_index = ScaleIndex{ const scale_index = ScaleIndex{
.scale = scale, .scale = scale,
.index = .rcx, .index = index_reg_disp.index,
}; };
return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{ return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm, .disp = index_reg_disp.disp,
.base = ops.reg2, .base = ops.reg2,
.scale_index = scale_index, .scale_index = scale_index,
}), emit.code); }), emit.code);
@ -630,22 +632,16 @@ fn mirArithScaleSrc(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
fn mirArithScaleDst(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void { fn mirArithScaleDst(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode(); const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags; const scale = ops.flags;
const imm = emit.mir.instructions.items(.data)[inst].imm; const payload = emit.mir.instructions.items(.data)[inst].payload;
const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
const scale_index = ScaleIndex{ const scale_index = ScaleIndex{
.scale = scale, .scale = scale,
.index = .rax, .index = index_reg_disp.index,
}; };
if (ops.reg2 == .none) { assert(ops.reg2 != .none);
// OP qword ptr [reg1 + scale*rax + 0], imm32 // OP [reg1 + scale*index + imm32], reg2
return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
.disp = 0,
.base = ops.reg1,
.scale_index = scale_index,
}), imm, emit.code);
}
// OP [reg1 + scale*rax + imm32], reg2
return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{ return lowerToMrEnc(tag, RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg2.size()), .{
.disp = imm, .disp = index_reg_disp.disp,
.base = ops.reg1, .base = ops.reg1,
.scale_index = scale_index, .scale_index = scale_index,
}), ops.reg2, emit.code); }), ops.reg2, emit.code);
@ -655,24 +651,24 @@ fn mirArithScaleImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void
const ops = emit.mir.instructions.items(.ops)[inst].decode(); const ops = emit.mir.instructions.items(.ops)[inst].decode();
const scale = ops.flags; const scale = ops.flags;
const payload = emit.mir.instructions.items(.data)[inst].payload; const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data; const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
const scale_index = ScaleIndex{ const scale_index = ScaleIndex{
.scale = scale, .scale = scale,
.index = .rax, .index = index_reg_disp_imm.index,
}; };
// OP qword ptr [reg1 + scale*rax + imm32], imm32 // OP qword ptr [reg1 + scale*index + imm32], imm32
return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{ return lowerToMiEnc(tag, RegisterOrMemory.mem(.qword_ptr, .{
.disp = imm_pair.dest_off, .disp = index_reg_disp_imm.disp,
.base = ops.reg1, .base = ops.reg1,
.scale_index = scale_index, .scale_index = scale_index,
}), imm_pair.operand, emit.code); }), index_reg_disp_imm.imm, emit.code);
} }
fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void { fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode(); const ops = emit.mir.instructions.items(.ops)[inst].decode();
assert(ops.reg2 == .none); assert(ops.reg2 == .none);
const payload = emit.mir.instructions.items(.data)[inst].payload; const payload = emit.mir.instructions.items(.data)[inst].payload;
const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data; const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
const ptr_size: Memory.PtrSize = switch (ops.flags) { const ptr_size: Memory.PtrSize = switch (ops.flags) {
0b00 => .byte_ptr, 0b00 => .byte_ptr,
0b01 => .word_ptr, 0b01 => .word_ptr,
@ -681,14 +677,14 @@ fn mirArithMemIndexImm(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!v
}; };
const scale_index = ScaleIndex{ const scale_index = ScaleIndex{
.scale = 0, .scale = 0,
.index = .rax, .index = index_reg_disp_imm.index,
}; };
// OP ptr [reg1 + rax*1 + imm32], imm32 // OP ptr [reg1 + index + imm32], imm32
return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{ return lowerToMiEnc(tag, RegisterOrMemory.mem(ptr_size, .{
.disp = imm_pair.dest_off, .disp = index_reg_disp_imm.disp,
.base = ops.reg1, .base = ops.reg1,
.scale_index = scale_index, .scale_index = scale_index,
}), imm_pair.operand, emit.code); }), index_reg_disp_imm.imm, emit.code);
} }
fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
@ -956,18 +952,19 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp); mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp);
}, },
0b10 => { 0b10 => {
// lea reg, [rbp + rcx + imm32] // lea reg, [rbp + index + imm32]
const imm = emit.mir.instructions.items(.data)[inst].imm; const payload = emit.mir.instructions.items(.data)[inst].payload;
const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null; const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
const scale_index = ScaleIndex{ const scale_index = ScaleIndex{
.scale = 0, .scale = 0,
.index = .rcx, .index = index_reg_disp.index,
}; };
return lowerToRmEnc( return lowerToRmEnc(
.lea, .lea,
ops.reg1, ops.reg1,
RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{ RegisterOrMemory.mem(Memory.PtrSize.new(ops.reg1.size()), .{
.disp = imm, .disp = index_reg_disp.disp,
.base = src_reg, .base = src_reg,
.scale_index = scale_index, .scale_index = scale_index,
}), }),
@ -985,8 +982,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const relocation = emit.mir.instructions.items(.data)[inst].relocation; const relocation = emit.mir.instructions.items(.data)[inst].relocation;
switch (ops.flags) { switch (ops.flags) {
0b00, 0b01 => {}, 0b00, 0b01, 0b10 => {},
else => return emit.fail("TODO unused LEA PIC variants 0b10 and 0b11", .{}), else => return emit.fail("TODO unused LEA PIC variant 0b11", .{}),
} }
// lea reg1, [rip + reloc] // lea reg1, [rip + reloc]
@ -1024,6 +1021,7 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.@"type" = switch (ops.flags) { .@"type" = switch (ops.flags) {
0b00 => .got, 0b00 => .got,
0b01 => .direct, 0b01 => .direct,
0b10 => .imports,
else => unreachable, else => unreachable,
}, },
.target = .{ .sym_index = relocation.sym_index, .file = null }, .target = .{ .sym_index = relocation.sym_index, .file = null },
@ -1031,7 +1029,6 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.addend = 0, .addend = 0,
.pcrel = true, .pcrel = true,
.length = 2, .length = 2,
.prev_vaddr = atom.getSymbol(coff_file).value,
}); });
} else { } else {
return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{}); return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
@ -1157,6 +1154,17 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2, .length = 2,
.@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH), .@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
}); });
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
try atom.addRelocation(coff_file, .{
.@"type" = .direct,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = offset,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else { } else {
return emit.fail("TODO implement call_extern for linking backends different than MachO", .{}); return emit.fail("TODO implement call_extern for linking backends different than MachO", .{});
} }
@ -2241,6 +2249,7 @@ fn lowerToMxEnc(tag: Tag, reg_or_mem: RegisterOrMemory, enc: Encoding, code: *st
encoder.rex(.{ encoder.rex(.{
.w = wide, .w = wide,
.b = base.isExtended(), .b = base.isExtended(),
.x = if (mem_op.scale_index) |si| si.index.isExtended() else false,
}); });
} }
opc.encode(encoder); opc.encode(encoder);
@ -2346,10 +2355,12 @@ fn lowerToMiXEnc(
encoder.rex(.{ encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr, .w = dst_mem.ptr_size == .qword_ptr,
.b = base.isExtended(), .b = base.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} else { } else {
encoder.rex(.{ encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr, .w = dst_mem.ptr_size == .qword_ptr,
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} }
opc.encode(encoder); opc.encode(encoder);
@ -2401,11 +2412,13 @@ fn lowerToRmEnc(
.w = setRexWRegister(reg), .w = setRexWRegister(reg),
.r = reg.isExtended(), .r = reg.isExtended(),
.b = base.isExtended(), .b = base.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} else { } else {
encoder.rex(.{ encoder.rex(.{
.w = setRexWRegister(reg), .w = setRexWRegister(reg),
.r = reg.isExtended(), .r = reg.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} }
opc.encode(encoder); opc.encode(encoder);
@ -2446,11 +2459,13 @@ fn lowerToMrEnc(
.w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg), .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
.r = reg.isExtended(), .r = reg.isExtended(),
.b = base.isExtended(), .b = base.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} else { } else {
encoder.rex(.{ encoder.rex(.{
.w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg), .w = dst_mem.ptr_size == .qword_ptr or setRexWRegister(reg),
.r = reg.isExtended(), .r = reg.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} }
opc.encode(encoder); opc.encode(encoder);
@ -2490,11 +2505,13 @@ fn lowerToRmiEnc(
.w = setRexWRegister(reg), .w = setRexWRegister(reg),
.r = reg.isExtended(), .r = reg.isExtended(),
.b = base.isExtended(), .b = base.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} else { } else {
encoder.rex(.{ encoder.rex(.{
.w = setRexWRegister(reg), .w = setRexWRegister(reg),
.r = reg.isExtended(), .r = reg.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} }
opc.encode(encoder); opc.encode(encoder);
@ -2531,10 +2548,12 @@ fn lowerToVmEnc(
vex.rex(.{ vex.rex(.{
.r = reg.isExtended(), .r = reg.isExtended(),
.b = base.isExtended(), .b = base.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} else { } else {
vex.rex(.{ vex.rex(.{
.r = reg.isExtended(), .r = reg.isExtended(),
.x = if (src_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} }
encoder.vex(enc.prefix); encoder.vex(enc.prefix);
@ -2571,10 +2590,12 @@ fn lowerToMvEnc(
vex.rex(.{ vex.rex(.{
.r = reg.isExtended(), .r = reg.isExtended(),
.b = base.isExtended(), .b = base.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} else { } else {
vex.rex(.{ vex.rex(.{
.r = reg.isExtended(), .r = reg.isExtended(),
.x = if (dst_mem.scale_index) |si| si.index.isExtended() else false,
}); });
} }
encoder.vex(enc.prefix); encoder.vex(enc.prefix);

View File

@ -44,25 +44,28 @@ pub const Inst = struct {
/// 0b01 word ptr [reg1 + imm32], imm16 /// 0b01 word ptr [reg1 + imm32], imm16
/// 0b10 dword ptr [reg1 + imm32], imm32 /// 0b10 dword ptr [reg1 + imm32], imm32
/// 0b11 qword ptr [reg1 + imm32], imm32 (sign-extended to imm64) /// 0b11 qword ptr [reg1 + imm32], imm32 (sign-extended to imm64)
/// Notes:
/// * Uses `ImmPair` as payload
adc_mem_imm, adc_mem_imm,
/// form: reg1, [reg2 + scale*rcx + imm32] /// form: reg1, [reg2 + scale*index + imm32]
/// ops flags scale
/// 0b00 1
/// 0b01 2
/// 0b10 4
/// 0b11 8
adc_scale_src,
/// form: [reg1 + scale*rax + imm32], reg2
/// form: [reg1 + scale*rax + 0], imm32
/// ops flags scale /// ops flags scale
/// 0b00 1 /// 0b00 1
/// 0b01 2 /// 0b01 2
/// 0b10 4 /// 0b10 4
/// 0b11 8 /// 0b11 8
/// Notes: /// Notes:
/// * If reg2 is `none` then it means Data field `imm` is used as the immediate. /// * Uses `IndexRegisterDisp` as payload
adc_scale_src,
/// form: [reg1 + scale*index + imm32], reg2
/// ops flags scale
/// 0b00 1
/// 0b01 2
/// 0b10 4
/// 0b11 8
/// Notes:
/// * Uses `IndexRegisterDisp` payload.
adc_scale_dst, adc_scale_dst,
/// form: [reg1 + scale*rax + imm32], imm32 /// form: [reg1 + scale*rax + imm32], imm32
@ -72,14 +75,16 @@ pub const Inst = struct {
/// 0b10 4 /// 0b10 4
/// 0b11 8 /// 0b11 8
/// Notes: /// Notes:
/// * Data field `payload` points at `ImmPair`. /// * Uses `IndexRegisterDispImm` payload.
adc_scale_imm, adc_scale_imm,
/// ops flags: form: /// ops flags: form:
/// 0b00 byte ptr [reg1 + rax + imm32], imm8 /// 0b00 byte ptr [reg1 + index + imm32], imm8
/// 0b01 word ptr [reg1 + rax + imm32], imm16 /// 0b01 word ptr [reg1 + index + imm32], imm16
/// 0b10 dword ptr [reg1 + rax + imm32], imm32 /// 0b10 dword ptr [reg1 + index + imm32], imm32
/// 0b11 qword ptr [reg1 + rax + imm32], imm32 (sign-extended to imm64) /// 0b11 qword ptr [reg1 + index + imm32], imm32 (sign-extended to imm64)
/// Notes:
/// * Uses `IndexRegisterDispImm` payload.
adc_mem_index_imm, adc_mem_index_imm,
// The following instructions all have the same encoding as `adc`. // The following instructions all have the same encoding as `adc`.
@ -174,12 +179,15 @@ pub const Inst = struct {
/// 0b00 reg1, [reg2 + imm32] /// 0b00 reg1, [reg2 + imm32]
/// 0b00 reg1, [ds:imm32] /// 0b00 reg1, [ds:imm32]
/// 0b01 reg1, [rip + imm32] /// 0b01 reg1, [rip + imm32]
/// 0b10 reg1, [reg2 + rcx + imm32] /// 0b10 reg1, [reg2 + index + imm32]
/// Notes:
/// * 0b10 uses `IndexRegisterDisp` payload
lea, lea,
/// ops flags: form: /// ops flags: form:
/// 0b00 reg1, [rip + reloc] // via GOT PIC /// 0b00 reg1, [rip + reloc] // via GOT PIC
/// 0b01 reg1, [rip + reloc] // direct load PIC /// 0b01 reg1, [rip + reloc] // direct load PIC
/// 0b10 reg1, [rip + reloc] // via imports table PIC
/// Notes: /// Notes:
/// * `Data` contains `relocation` /// * `Data` contains `relocation`
lea_pic, lea_pic,
@ -460,28 +468,86 @@ pub const Inst = struct {
} }
}; };
pub fn RegisterList(comptime Reg: type, comptime registers: []const Reg) type { pub const IndexRegisterDisp = struct {
assert(registers.len <= @bitSizeOf(u32)); /// Index register to use with SIB-based encoding
return struct { index: u32,
bitset: RegBitSet = RegBitSet.initEmpty(),
const RegBitSet = IntegerBitSet(registers.len); /// Displacement value
disp: u32,
pub fn encode(index: Register, disp: u32) IndexRegisterDisp {
return .{
.index = @enumToInt(index),
.disp = disp,
};
}
pub fn decode(this: IndexRegisterDisp) struct {
index: Register,
disp: u32,
} {
return .{
.index = @intToEnum(Register, this.index),
.disp = this.disp,
};
}
};
/// TODO: would it be worth making `IndexRegisterDisp` and `IndexRegisterDispImm` a variable length list
/// instead of having two structs, one a superset of the other one?
pub const IndexRegisterDispImm = struct {
/// Index register to use with SIB-based encoding
index: u32,
/// Displacement value
disp: u32,
/// Immediate
imm: u32,
pub fn encode(index: Register, disp: u32, imm: u32) IndexRegisterDispImm {
return .{
.index = @enumToInt(index),
.disp = disp,
.imm = imm,
};
}
pub fn decode(this: IndexRegisterDispImm) struct {
index: Register,
disp: u32,
imm: u32,
} {
return .{
.index = @intToEnum(Register, this.index),
.disp = this.disp,
.imm = this.imm,
};
}
};
/// Used in conjunction with `SaveRegisterList` payload to transfer a list of used registers
/// in a compact manner.
pub const RegisterList = struct {
bitset: BitSet = BitSet.initEmpty(),
const BitSet = IntegerBitSet(@ctz(@as(u32, 0)));
const Self = @This(); const Self = @This();
fn getIndexForReg(reg: Reg) RegBitSet.MaskInt { fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
inline for (registers) |cpreg, i| { for (registers) |cpreg, i| {
if (reg.id() == cpreg.id()) return i; if (reg.id() == cpreg.id()) return @intCast(u32, i);
} }
unreachable; // register not in input register list! unreachable; // register not in input register list!
} }
pub fn push(self: *Self, reg: Reg) void { pub fn push(self: *Self, registers: []const Register, reg: Register) void {
const index = getIndexForReg(reg); const index = getIndexForReg(registers, reg);
self.bitset.set(index); self.bitset.set(index);
} }
pub fn isSet(self: Self, reg: Reg) bool { pub fn isSet(self: Self, registers: []const Register, reg: Register) bool {
const index = getIndexForReg(reg); const index = getIndexForReg(registers, reg);
return self.bitset.isSet(index); return self.bitset.isSet(index);
} }
@ -491,7 +557,7 @@ pub fn RegisterList(comptime Reg: type, comptime registers: []const Reg) type {
pub fn fromInt(mask: u32) Self { pub fn fromInt(mask: u32) Self {
return .{ return .{
.bitset = RegBitSet{ .mask = @intCast(RegBitSet.MaskInt, mask) }, .bitset = BitSet{ .mask = @intCast(BitSet.MaskInt, mask) },
}; };
} }
@ -499,7 +565,6 @@ pub fn RegisterList(comptime Reg: type, comptime registers: []const Reg) type {
return @intCast(u32, self.bitset.count()); return @intCast(u32, self.bitset.count());
} }
}; };
}
pub const SaveRegisterList = struct { pub const SaveRegisterList = struct {
/// Use `RegisterList` to populate. /// Use `RegisterList` to populate.

View File

@ -392,6 +392,7 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
} }
} }
pub const SysV = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them /// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set. /// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 }; pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
@ -402,13 +403,58 @@ pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8
pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 }; pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx }; pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
};
pub const Win64 = struct {
/// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
/// for anything else but stack offset tracking therefore we exclude them from this set.
pub const callee_preserved_regs = [_]Register{ .rbx, .rsi, .rdi, .r12, .r13, .r14, .r15 };
/// These registers need to be preserved (saved on the stack) and restored by the caller before
/// the caller relinquishes control to a subroutine via call instruction (or similar).
/// In other words, these registers are free to use by the callee.
pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 };
pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 };
pub const c_abi_int_return_regs = [_]Register{.rax};
};
pub fn getCalleePreservedRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.callee_preserved_regs,
else => &SysV.callee_preserved_regs,
};
}
pub fn getCallerPreservedRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.caller_preserved_regs,
else => &SysV.caller_preserved_regs,
};
}
pub fn getCAbiIntParamRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.c_abi_int_param_regs,
else => &SysV.c_abi_int_param_regs,
};
}
pub fn getCAbiIntReturnRegs(target: Target) []const Register {
return switch (target.os.tag) {
.windows => &Win64.c_abi_int_return_regs,
else => &SysV.c_abi_int_return_regs,
};
}
const gp_regs = [_]Register{
.rbx, .r12, .r13, .r14, .r15, .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11,
};
const sse_avx_regs = [_]Register{ const sse_avx_regs = [_]Register{
.ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7, .ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7,
.ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15, .ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15,
}; };
const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs ++ sse_avx_regs; const allocatable_regs = gp_regs ++ sse_avx_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers); pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_regs);
// Register classes // Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet; const RegisterBitSet = RegisterManager.RegisterBitSet;
@ -417,15 +463,15 @@ pub const RegisterClass = struct {
var set = RegisterBitSet.initEmpty(); var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{ set.setRangeValue(.{
.start = 0, .start = 0,
.end = caller_preserved_regs.len + callee_preserved_regs.len, .end = gp_regs.len,
}, true); }, true);
break :blk set; break :blk set;
}; };
pub const sse: RegisterBitSet = blk: { pub const sse: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty(); var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{ set.setRangeValue(.{
.start = caller_preserved_regs.len + callee_preserved_regs.len, .start = gp_regs.len,
.end = allocatable_registers.len, .end = allocatable_regs.len,
}, true); }, true);
break :blk set; break :blk set;
}; };

View File

@ -3912,7 +3912,7 @@ pub const DeclGen = struct {
var b: usize = 0; var b: usize = 0;
for (parent_ty.structFields().values()[0..field_index]) |field| { for (parent_ty.structFields().values()[0..field_index]) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue;
b += field.ty.bitSize(target); b += @intCast(usize, field.ty.bitSize(target));
} }
break :b b; break :b b;
}; };
@ -9385,6 +9385,12 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
} }
if (info.pointee_type.isPtrAtRuntime()) {
const same_size_int = self.context.intType(elem_bits);
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, "");
}
return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
} }
@ -9416,7 +9422,10 @@ pub const FuncGen = struct {
// Convert to equally-sized integer type in order to perform the bit // Convert to equally-sized integer type in order to perform the bit
// operations on the value to store // operations on the value to store
const value_bits_type = self.context.intType(elem_bits); const value_bits_type = self.context.intType(elem_bits);
const value_bits = self.builder.buildBitCast(elem, value_bits_type, ""); const value_bits = if (elem_ty.isPtrAtRuntime())
self.builder.buildPtrToInt(elem, value_bits_type, "")
else
self.builder.buildBitCast(elem, value_bits_type, "");
var mask_val = value_bits_type.constAllOnes(); var mask_val = value_bits_type.constAllOnes();
mask_val = mask_val.constZExt(containing_int_ty); mask_val = mask_val.constZExt(containing_int_ty);

View File

@ -166,6 +166,9 @@ pub const Options = struct {
version_script: ?[]const u8, version_script: ?[]const u8,
soname: ?[]const u8, soname: ?[]const u8,
llvm_cpu_features: ?[*:0]const u8, llvm_cpu_features: ?[*:0]const u8,
print_gc_sections: bool,
print_icf_sections: bool,
print_map: bool,
objects: []Compilation.LinkObject, objects: []Compilation.LinkObject,
framework_dirs: []const []const u8, framework_dirs: []const []const u8,
@ -476,7 +479,7 @@ pub const File = struct {
log.debug("getGlobalSymbol '{s}'", .{name}); log.debug("getGlobalSymbol '{s}'", .{name});
switch (base.tag) { switch (base.tag) {
// zig fmt: off // zig fmt: off
.coff => unreachable, .coff => return @fieldParentPtr(Coff, "base", base).getGlobalSymbol(name),
.elf => unreachable, .elf => unreachable,
.macho => return @fieldParentPtr(MachO, "base", base).getGlobalSymbol(name), .macho => return @fieldParentPtr(MachO, "base", base).getGlobalSymbol(name),
.plan9 => unreachable, .plan9 => unreachable,

File diff suppressed because it is too large Load Diff

View File

@ -4,8 +4,6 @@ const std = @import("std");
const coff = std.coff; const coff = std.coff;
const log = std.log.scoped(.link); const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const Coff = @import("../Coff.zig"); const Coff = @import("../Coff.zig");
const Reloc = Coff.Reloc; const Reloc = Coff.Reloc;
const SymbolWithLoc = Coff.SymbolWithLoc; const SymbolWithLoc = Coff.SymbolWithLoc;
@ -41,11 +39,6 @@ pub const empty = Atom{
.next = null, .next = null,
}; };
pub fn deinit(self: *Atom, gpa: Allocator) void {
_ = self;
_ = gpa;
}
/// Returns symbol referencing this atom. /// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol { pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol {
return coff_file.getSymbol(.{ return coff_file.getSymbol(.{
@ -118,3 +111,13 @@ pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
} }
try gop.value_ptr.append(gpa, offset); try gop.value_ptr.append(gpa, offset);
} }
pub fn addBinding(self: *Atom, coff_file: *Coff, target: SymbolWithLoc) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding binding to target %{d} in %{d})", .{ target.sym_index, self.sym_index });
const gop = try coff_file.bindings.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, target);
}

View File

@ -861,7 +861,8 @@ pub fn commitDeclState(
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
writeDbgLineNopsBuffered(wasm_file.debug_line.items, src_fn.off, 0, &.{}, src_fn.len); const debug_line = wasm_file.debug_line_atom.?.code;
writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
}, },
else => unreachable, else => unreachable,
} }
@ -972,23 +973,21 @@ pub fn commitDeclState(
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
const segment_index = try wasm_file.getDebugLineIndex(); const atom = wasm_file.debug_line_atom.?;
const segment = &wasm_file.segments.items[segment_index]; const debug_line = &atom.code;
const debug_line = &wasm_file.debug_line; const segment_size = debug_line.items.len;
if (needed_size != segment.size) { if (needed_size != segment_size) {
log.debug(" needed size does not equal allocated size: {d}", .{needed_size}); log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
if (needed_size > segment.size) { if (needed_size > segment_size) {
log.debug(" allocating {d} bytes for 'debug line' information", .{needed_size - segment.size}); log.debug(" allocating {d} bytes for 'debug line' information", .{needed_size - segment_size});
try debug_line.resize(self.allocator, needed_size); try debug_line.resize(self.allocator, needed_size);
mem.set(u8, debug_line.items[segment.size..], 0); mem.set(u8, debug_line.items[segment_size..], 0);
} }
segment.size = needed_size;
debug_line.items.len = needed_size; debug_line.items.len = needed_size;
} }
const offset = segment.offset + src_fn.off;
writeDbgLineNopsBuffered( writeDbgLineNopsBuffered(
debug_line.items, debug_line.items,
offset, src_fn.off,
prev_padding_size, prev_padding_size,
dbg_line_buffer.items, dbg_line_buffer.items,
next_padding_size, next_padding_size,
@ -1146,10 +1145,8 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, file: *File, atom: *Atom, len: u3
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
const segment_index = try wasm_file.getDebugInfoIndex(); const debug_info = &wasm_file.debug_info_atom.?.code;
const segment = &wasm_file.segments.items[segment_index]; try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
const offset = segment.offset + atom.off;
try writeDbgInfoNopsToArrayList(gpa, &wasm_file.debug_info, offset, 0, &.{0}, atom.len, false);
}, },
else => unreachable, else => unreachable,
} }
@ -1276,27 +1273,25 @@ fn writeDeclDebugInfo(self: *Dwarf, file: *File, atom: *Atom, dbg_info_buf: []co
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
const segment_index = try wasm_file.getDebugInfoIndex(); const info_atom = wasm_file.debug_info_atom.?;
const segment = &wasm_file.segments.items[segment_index]; const debug_info = &info_atom.code;
const debug_info = &wasm_file.debug_info; const segment_size = debug_info.items.len;
if (needed_size != segment.size) { if (needed_size != segment_size) {
log.debug(" needed size does not equal allocated size: {d}", .{needed_size}); log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
if (needed_size > segment.size) { if (needed_size > segment_size) {
log.debug(" allocating {d} bytes for 'debug info' information", .{needed_size - segment.size}); log.debug(" allocating {d} bytes for 'debug info' information", .{needed_size - segment_size});
try debug_info.resize(self.allocator, needed_size); try debug_info.resize(self.allocator, needed_size);
mem.set(u8, debug_info.items[segment.size..], 0); mem.set(u8, debug_info.items[segment_size..], 0);
} }
segment.size = needed_size;
debug_info.items.len = needed_size; debug_info.items.len = needed_size;
} }
const offset = segment.offset + atom.off;
log.debug(" writeDbgInfoNopsToArrayList debug_info_len={d} offset={d} content_len={d} next_padding_size={d}", .{ log.debug(" writeDbgInfoNopsToArrayList debug_info_len={d} offset={d} content_len={d} next_padding_size={d}", .{
debug_info.items.len, offset, dbg_info_buf.len, next_padding_size, debug_info.items.len, atom.off, dbg_info_buf.len, next_padding_size,
}); });
try writeDbgInfoNopsToArrayList( try writeDbgInfoNopsToArrayList(
gpa, gpa,
debug_info, debug_info,
offset, atom.off,
prev_padding_size, prev_padding_size,
dbg_info_buf, dbg_info_buf,
next_padding_size, next_padding_size,
@ -1337,10 +1332,9 @@ pub fn updateDeclLineNumber(self: *Dwarf, file: *File, decl: *const Module.Decl)
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
const segment_index = wasm_file.getDebugLineIndex() catch unreachable; const offset = decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff();
const segment = wasm_file.segments.items[segment_index]; const atom = wasm_file.debug_line_atom.?;
const offset = segment.offset + decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff(); mem.copy(u8, atom.code.items[offset..], &data);
mem.copy(u8, wasm_file.debug_line.items[offset..], &data);
}, },
else => unreachable, else => unreachable,
} }
@ -1576,8 +1570,9 @@ pub fn writeDbgAbbrev(self: *Dwarf, file: *File) !void {
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
try wasm_file.debug_abbrev.resize(wasm_file.base.allocator, needed_size); const debug_abbrev = &wasm_file.debug_abbrev_atom.?.code;
mem.copy(u8, wasm_file.debug_abbrev.items, &abbrev_buf); try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_abbrev.items, &abbrev_buf);
}, },
else => unreachable, else => unreachable,
} }
@ -1687,7 +1682,8 @@ pub fn writeDbgInfoHeader(self: *Dwarf, file: *File, module: *Module, low_pc: u6
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
try writeDbgInfoNopsToArrayList(self.allocator, &wasm_file.debug_info, 0, 0, di_buf.items, jmp_amt, false); const debug_info = &wasm_file.debug_info_atom.?.code;
try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
}, },
else => unreachable, else => unreachable,
} }
@ -2016,8 +2012,9 @@ pub fn writeDbgAranges(self: *Dwarf, file: *File, addr: u64, size: u64) !void {
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
try wasm_file.debug_aranges.resize(wasm_file.base.allocator, needed_size); const debug_ranges = &wasm_file.debug_ranges_atom.?.code;
mem.copy(u8, wasm_file.debug_aranges.items, di_buf.items); try debug_ranges.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_ranges.items, di_buf.items);
}, },
else => unreachable, else => unreachable,
} }
@ -2139,7 +2136,8 @@ pub fn writeDbgLineHeader(self: *Dwarf, file: *File, module: *Module) !void {
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
writeDbgLineNopsBuffered(wasm_file.debug_line.items, 0, 0, di_buf.items, jmp_amt); const debug_line = wasm_file.debug_line_atom.?.code;
writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
}, },
else => unreachable, else => unreachable,
} }
@ -2287,7 +2285,8 @@ pub fn flushModule(self: *Dwarf, file: *File, module: *Module) !void {
}, },
.wasm => { .wasm => {
const wasm_file = file.cast(File.Wasm).?; const wasm_file = file.cast(File.Wasm).?;
mem.copy(u8, wasm_file.debug_info.items[reloc.atom.off + reloc.offset ..], &buf); const debug_info = wasm_file.debug_info_atom.?.code;
mem.copy(u8, debug_info.items[reloc.atom.off + reloc.offset ..], &buf);
}, },
else => unreachable, else => unreachable,
} }

View File

@ -1482,6 +1482,18 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
try argv.append("--gc-sections"); try argv.append("--gc-sections");
} }
if (self.base.options.print_gc_sections) {
try argv.append("--print-gc-sections");
}
if (self.base.options.print_icf_sections) {
try argv.append("--print-icf-sections");
}
if (self.base.options.print_map) {
try argv.append("--print-map");
}
if (self.base.options.eh_frame_hdr) { if (self.base.options.eh_frame_hdr) {
try argv.append("--eh-frame-hdr"); try argv.append("--eh-frame-hdr");
} }

View File

@ -793,11 +793,13 @@ fn linkOneShot(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node)
} }
} else { } else {
const sub_path = self.base.options.emit.?.sub_path; const sub_path = self.base.options.emit.?.sub_path;
if (self.base.file == null) {
self.base.file = try directory.handle.createFile(sub_path, .{ self.base.file = try directory.handle.createFile(sub_path, .{
.truncate = true, .truncate = true,
.read = true, .read = true,
.mode = link.determineMode(self.base.options), .mode = link.determineMode(self.base.options),
}); });
}
// Index 0 is always a null symbol. // Index 0 is always a null symbol.
try self.locals.append(gpa, .{ try self.locals.append(gpa, .{
.n_strx = 0, .n_strx = 0,
@ -1155,6 +1157,29 @@ fn linkOneShot(self: *MachO, comp: *Compilation, prog_node: *std.Progress.Node)
var ncmds: u32 = 0; var ncmds: u32 = 0;
try self.writeLinkeditSegmentData(&ncmds, lc_writer); try self.writeLinkeditSegmentData(&ncmds, lc_writer);
// If the last section of __DATA segment is zerofill section, we need to ensure
// that the free space between the end of the last non-zerofill section of __DATA
// segment and the beginning of __LINKEDIT segment is zerofilled as the loader will
// copy-paste this space into memory for quicker zerofill operation.
if (self.data_segment_cmd_index) |data_seg_id| blk: {
var physical_zerofill_start: u64 = 0;
const section_indexes = self.getSectionIndexes(data_seg_id);
for (self.sections.items(.header)[section_indexes.start..section_indexes.end]) |header| {
if (header.isZerofill() and header.size > 0) break;
physical_zerofill_start = header.offset + header.size;
} else break :blk;
const linkedit = self.segments.items[self.linkedit_segment_cmd_index.?];
const physical_zerofill_size = math.cast(usize, linkedit.fileoff - physical_zerofill_start) orelse
return error.Overflow;
if (physical_zerofill_size > 0) {
var padding = try self.base.allocator.alloc(u8, physical_zerofill_size);
defer self.base.allocator.free(padding);
mem.set(u8, padding, 0);
try self.base.file.?.pwriteAll(padding, physical_zerofill_start);
}
}
try writeDylinkerLC(&ncmds, lc_writer); try writeDylinkerLC(&ncmds, lc_writer);
try self.writeMainLC(&ncmds, lc_writer); try self.writeMainLC(&ncmds, lc_writer);
try self.writeDylibIdLC(&ncmds, lc_writer); try self.writeDylibIdLC(&ncmds, lc_writer);
@ -1435,7 +1460,6 @@ fn parseArchive(self: *MachO, path: []const u8, force_load: bool) !bool {
if (force_load) { if (force_load) {
defer archive.deinit(gpa); defer archive.deinit(gpa);
defer file.close();
// Get all offsets from the ToC // Get all offsets from the ToC
var offsets = std.AutoArrayHashMap(u32, void).init(gpa); var offsets = std.AutoArrayHashMap(u32, void).init(gpa);
defer offsets.deinit(); defer offsets.deinit();
@ -3086,15 +3110,6 @@ pub fn deinit(self: *MachO) void {
self.atom_by_index_table.deinit(gpa); self.atom_by_index_table.deinit(gpa);
} }
pub fn closeFiles(self: MachO) void {
for (self.archives.items) |archive| {
archive.file.close();
}
if (self.d_sym) |ds| {
ds.file.close();
}
}
fn freeAtom(self: *MachO, atom: *Atom, sect_id: u8, owns_atom: bool) void { fn freeAtom(self: *MachO, atom: *Atom, sect_id: u8, owns_atom: bool) void {
log.debug("freeAtom {*}", .{atom}); log.debug("freeAtom {*}", .{atom});
if (!owns_atom) { if (!owns_atom) {
@ -5698,9 +5713,11 @@ fn writeHeader(self: *MachO, ncmds: u32, sizeofcmds: u32) !void {
else => unreachable, else => unreachable,
} }
if (self.getSectionByName("__DATA", "__thread_vars")) |_| { if (self.getSectionByName("__DATA", "__thread_vars")) |sect_id| {
if (self.sections.items(.header)[sect_id].size > 0) {
header.flags |= macho.MH_HAS_TLV_DESCRIPTORS; header.flags |= macho.MH_HAS_TLV_DESCRIPTORS;
} }
}
header.ncmds = ncmds; header.ncmds = ncmds;
header.sizeofcmds = sizeofcmds; header.sizeofcmds = sizeofcmds;

View File

@ -88,6 +88,7 @@ const ar_hdr = extern struct {
}; };
pub fn deinit(self: *Archive, allocator: Allocator) void { pub fn deinit(self: *Archive, allocator: Allocator) void {
self.file.close();
for (self.toc.keys()) |*key| { for (self.toc.keys()) |*key| {
allocator.free(key.*); allocator.free(key.*);
} }

View File

@ -306,6 +306,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Opti
} }
pub fn deinit(self: *DebugSymbols, allocator: Allocator) void { pub fn deinit(self: *DebugSymbols, allocator: Allocator) void {
self.file.close();
self.segments.deinit(allocator); self.segments.deinit(allocator);
self.sections.deinit(allocator); self.sections.deinit(allocator);
self.dwarf.deinit(); self.dwarf.deinit();

View File

@ -67,6 +67,18 @@ code_section_index: ?u32 = null,
debug_info_index: ?u32 = null, debug_info_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_line' section. /// The index of the segment representing the custom '.debug_line' section.
debug_line_index: ?u32 = null, debug_line_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_loc' section.
debug_loc_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_ranges' section.
debug_ranges_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_pubnames' section.
debug_pubnames_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_pubtypes' section.
debug_pubtypes_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_pubtypes' section.
debug_str_index: ?u32 = null,
/// The index of the segment representing the custom '.debug_pubtypes' section.
debug_abbrev_index: ?u32 = null,
/// The count of imported functions. This number will be appended /// The count of imported functions. This number will be appended
/// to the function indexes as their index starts at the lowest non-extern function. /// to the function indexes as their index starts at the lowest non-extern function.
imported_functions_count: u32 = 0, imported_functions_count: u32 = 0,
@ -83,24 +95,15 @@ imports: std.AutoHashMapUnmanaged(SymbolLoc, types.Import) = .{},
segments: std.ArrayListUnmanaged(Segment) = .{}, segments: std.ArrayListUnmanaged(Segment) = .{},
/// Maps a data segment key (such as .rodata) to the index into `segments`. /// Maps a data segment key (such as .rodata) to the index into `segments`.
data_segments: std.StringArrayHashMapUnmanaged(u32) = .{}, data_segments: std.StringArrayHashMapUnmanaged(u32) = .{},
/// A list of `types.Segment` which provide meta data /// A table of `types.Segment` which provide meta data
/// about a data symbol such as its name /// about a data symbol such as its name where the key is
segment_info: std.ArrayListUnmanaged(types.Segment) = .{}, /// the segment index, which can be found from `data_segments`
segment_info: std.AutoArrayHashMapUnmanaged(u32, types.Segment) = .{},
/// Deduplicated string table for strings used by symbols, imports and exports. /// Deduplicated string table for strings used by symbols, imports and exports.
string_table: StringTable = .{}, string_table: StringTable = .{},
/// Debug information for wasm /// Debug information for wasm
dwarf: ?Dwarf = null, dwarf: ?Dwarf = null,
// *debug information* //
/// Contains all bytes for the '.debug_info' section
debug_info: std.ArrayListUnmanaged(u8) = .{},
/// Contains all bytes for the '.debug_line' section
debug_line: std.ArrayListUnmanaged(u8) = .{},
/// Contains all bytes for the '.debug_abbrev' section
debug_abbrev: std.ArrayListUnmanaged(u8) = .{},
/// Contains all bytes for the '.debug_ranges' section
debug_aranges: std.ArrayListUnmanaged(u8) = .{},
// Output sections // Output sections
/// Output type section /// Output type section
func_types: std.ArrayListUnmanaged(wasm.Type) = .{}, func_types: std.ArrayListUnmanaged(wasm.Type) = .{},
@ -156,6 +159,19 @@ export_names: std.AutoHashMapUnmanaged(SymbolLoc, u32) = .{},
/// The actual table is populated during `flush`. /// The actual table is populated during `flush`.
error_table_symbol: ?u32 = null, error_table_symbol: ?u32 = null,
// Debug section atoms. These are only set when the current compilation
// unit contains Zig code. The lifetime of these atoms are extended
// until the end of the compiler's lifetime. Meaning they're not freed
// during `flush()` in incremental-mode.
debug_info_atom: ?*Atom = null,
debug_line_atom: ?*Atom = null,
debug_loc_atom: ?*Atom = null,
debug_ranges_atom: ?*Atom = null,
debug_abbrev_atom: ?*Atom = null,
debug_str_atom: ?*Atom = null,
debug_pubnames_atom: ?*Atom = null,
debug_pubtypes_atom: ?*Atom = null,
pub const Segment = struct { pub const Segment = struct {
alignment: u32, alignment: u32,
size: u32, size: u32,
@ -209,6 +225,18 @@ pub const SymbolLoc = struct {
} }
return wasm_bin.string_table.get(wasm_bin.symbols.items[self.index].name); return wasm_bin.string_table.get(wasm_bin.symbols.items[self.index].name);
} }
/// From a given symbol location, returns the final location.
/// e.g. when a symbol was resolved and replaced by the symbol
/// in a different file, this will return said location.
/// If the symbol wasn't replaced by another, this will return
/// the given location itself.
pub fn finalLoc(self: SymbolLoc, wasm_bin: *const Wasm) SymbolLoc {
if (wasm_bin.discarded.get(self)) |new_loc| {
return new_loc.finalLoc(wasm_bin);
}
return self;
}
}; };
/// Generic string table that duplicates strings /// Generic string table that duplicates strings
@ -335,6 +363,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
}; };
} }
try wasm_bin.initDebugSections();
return wasm_bin; return wasm_bin;
} }
@ -363,6 +392,24 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
return self; return self;
} }
/// Initializes symbols and atoms for the debug sections
/// Initialization is only done when compiling Zig code.
/// When Zig is invoked as a linker instead, the atoms
/// and symbols come from the object files instead.
pub fn initDebugSections(self: *Wasm) !void {
if (self.dwarf == null) return; // not compiling Zig code, so no need to pre-initialize debug sections
assert(self.debug_info_index == null);
// this will create an Atom and set the index for us.
self.debug_info_atom = try self.createDebugSectionForIndex(&self.debug_info_index, ".debug_info");
self.debug_line_atom = try self.createDebugSectionForIndex(&self.debug_line_index, ".debug_line");
self.debug_loc_atom = try self.createDebugSectionForIndex(&self.debug_loc_index, ".debug_loc");
self.debug_abbrev_atom = try self.createDebugSectionForIndex(&self.debug_abbrev_index, ".debug_abbrev");
self.debug_ranges_atom = try self.createDebugSectionForIndex(&self.debug_ranges_index, ".debug_ranges");
self.debug_str_atom = try self.createDebugSectionForIndex(&self.debug_str_index, ".debug_str");
self.debug_pubnames_atom = try self.createDebugSectionForIndex(&self.debug_pubnames_index, ".debug_pubnames");
self.debug_pubtypes_atom = try self.createDebugSectionForIndex(&self.debug_pubtypes_index, ".debug_pubtypes");
}
fn parseInputFiles(self: *Wasm, files: []const []const u8) !void { fn parseInputFiles(self: *Wasm, files: []const []const u8) !void {
for (files) |path| { for (files) |path| {
if (try self.parseObjectFile(path)) continue; if (try self.parseObjectFile(path)) continue;
@ -644,16 +691,14 @@ pub fn deinit(self: *Wasm) void {
for (self.func_types.items) |*func_type| { for (self.func_types.items) |*func_type| {
func_type.deinit(gpa); func_type.deinit(gpa);
} }
for (self.segment_info.items) |segment_info| { for (self.segment_info.values()) |segment_info| {
gpa.free(segment_info.name); gpa.free(segment_info.name);
} }
for (self.objects.items) |*object| { for (self.objects.items) |*object| {
object.file.?.close();
object.deinit(gpa); object.deinit(gpa);
} }
for (self.archives.items) |*archive| { for (self.archives.items) |*archive| {
archive.file.close();
archive.deinit(gpa); archive.deinit(gpa);
} }
@ -692,11 +737,6 @@ pub fn deinit(self: *Wasm) void {
if (self.dwarf) |*dwarf| { if (self.dwarf) |*dwarf| {
dwarf.deinit(); dwarf.deinit();
} }
self.debug_info.deinit(gpa);
self.debug_line.deinit(gpa);
self.debug_abbrev.deinit(gpa);
self.debug_aranges.deinit(gpa);
} }
pub fn allocateDeclIndexes(self: *Wasm, decl_index: Module.Decl.Index) !void { pub fn allocateDeclIndexes(self: *Wasm, decl_index: Module.Decl.Index) !void {
@ -1337,16 +1377,7 @@ fn parseAtom(self: *Wasm, atom: *Atom, kind: Kind) !void {
const index = gop.value_ptr.*; const index = gop.value_ptr.*;
self.segments.items[index].size += atom.size; self.segments.items[index].size += atom.size;
// segment indexes can be off by 1 due to also containing a segment symbol.index = @intCast(u32, self.segment_info.getIndex(index).?);
// for the code section, so we must check if the existing segment
// is larger than that of the code section, and substract the index by 1 in such case.
var info_add = if (self.code_section_index) |idx| blk: {
if (idx < index) break :blk @as(u32, 1);
break :blk 0;
} else @as(u32, 0);
if (self.debug_info_index != null) info_add += 1;
if (self.debug_line_index != null) info_add += 1;
symbol.index = index - info_add;
// segment info already exists, so free its memory // segment info already exists, so free its memory
self.base.allocator.free(segment_name); self.base.allocator.free(segment_name);
break :result index; break :result index;
@ -1359,8 +1390,8 @@ fn parseAtom(self: *Wasm, atom: *Atom, kind: Kind) !void {
}); });
gop.value_ptr.* = index; gop.value_ptr.* = index;
const info_index = @intCast(u32, self.segment_info.items.len); const info_index = @intCast(u32, self.segment_info.count());
try self.segment_info.append(self.base.allocator, segment_info); try self.segment_info.put(self.base.allocator, index, segment_info);
symbol.index = info_index; symbol.index = info_index;
break :result index; break :result index;
} }
@ -1370,18 +1401,54 @@ fn parseAtom(self: *Wasm, atom: *Atom, kind: Kind) !void {
const segment: *Segment = &self.segments.items[final_index]; const segment: *Segment = &self.segments.items[final_index];
segment.alignment = std.math.max(segment.alignment, atom.alignment); segment.alignment = std.math.max(segment.alignment, atom.alignment);
if (self.atoms.getPtr(final_index)) |last| { try self.appendAtomAtIndex(final_index, atom);
}
/// From a given index, append the given `Atom` at the back of the linked list.
/// Simply inserts it into the map of atoms when it doesn't exist yet.
pub fn appendAtomAtIndex(self: *Wasm, index: u32, atom: *Atom) !void {
if (self.atoms.getPtr(index)) |last| {
last.*.next = atom; last.*.next = atom;
atom.prev = last.*; atom.prev = last.*;
last.* = atom; last.* = atom;
} else { } else {
try self.atoms.putNoClobber(self.base.allocator, final_index, atom); try self.atoms.putNoClobber(self.base.allocator, index, atom);
} }
} }
/// Allocates debug atoms into their respective debug sections
/// to merge them with maybe-existing debug atoms from object files.
fn allocateDebugAtoms(self: *Wasm) !void {
if (self.dwarf == null) return;
const allocAtom = struct {
fn f(bin: *Wasm, maybe_index: *?u32, atom: *Atom) !void {
const index = maybe_index.* orelse idx: {
const index = @intCast(u32, bin.segments.items.len);
try bin.appendDummySegment();
maybe_index.* = index;
break :idx index;
};
atom.size = @intCast(u32, atom.code.items.len);
bin.symbols.items[atom.sym_index].index = index;
try bin.appendAtomAtIndex(index, atom);
}
}.f;
try allocAtom(self, &self.debug_info_index, self.debug_info_atom.?);
try allocAtom(self, &self.debug_line_index, self.debug_line_atom.?);
try allocAtom(self, &self.debug_loc_index, self.debug_loc_atom.?);
try allocAtom(self, &self.debug_str_index, self.debug_str_atom.?);
try allocAtom(self, &self.debug_ranges_index, self.debug_ranges_atom.?);
try allocAtom(self, &self.debug_abbrev_index, self.debug_abbrev_atom.?);
try allocAtom(self, &self.debug_pubnames_index, self.debug_pubnames_atom.?);
try allocAtom(self, &self.debug_pubtypes_index, self.debug_pubtypes_atom.?);
}
fn allocateAtoms(self: *Wasm) !void { fn allocateAtoms(self: *Wasm) !void {
// first sort the data segments // first sort the data segments
try sortDataSegments(self); try sortDataSegments(self);
try allocateDebugAtoms(self);
var it = self.atoms.iterator(); var it = self.atoms.iterator();
while (it.next()) |entry| { while (it.next()) |entry| {
@ -1399,7 +1466,7 @@ fn allocateAtoms(self: *Wasm) !void {
atom.size, atom.size,
}); });
offset += atom.size; offset += atom.size;
self.symbol_atom.putAssumeCapacity(atom.symbolLoc(), atom); // Update atom pointers try self.symbol_atom.put(self.base.allocator, atom.symbolLoc(), atom); // Update atom pointers
atom = atom.next orelse break; atom = atom.next orelse break;
} }
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
@ -1753,7 +1820,7 @@ fn setupMemory(self: *Wasm) !void {
/// From a given object's index and the index of the segment, returns the corresponding /// From a given object's index and the index of the segment, returns the corresponding
/// index of the segment within the final data section. When the segment does not yet /// index of the segment within the final data section. When the segment does not yet
/// exist, a new one will be initialized and appended. The new index will be returned in that case. /// exist, a new one will be initialized and appended. The new index will be returned in that case.
pub fn getMatchingSegment(self: *Wasm, object_index: u16, relocatable_index: u32) !u32 { pub fn getMatchingSegment(self: *Wasm, object_index: u16, relocatable_index: u32) !?u32 {
const object: Object = self.objects.items[object_index]; const object: Object = self.objects.items[object_index];
const relocatable_data = object.relocatable_data[relocatable_index]; const relocatable_data = object.relocatable_data[relocatable_index];
const index = @intCast(u32, self.segments.items.len); const index = @intCast(u32, self.segments.items.len);
@ -1765,25 +1832,81 @@ pub fn getMatchingSegment(self: *Wasm, object_index: u16, relocatable_index: u32
const result = try self.data_segments.getOrPut(self.base.allocator, segment_info.outputName(merge_segment)); const result = try self.data_segments.getOrPut(self.base.allocator, segment_info.outputName(merge_segment));
if (!result.found_existing) { if (!result.found_existing) {
result.value_ptr.* = index; result.value_ptr.* = index;
try self.segments.append(self.base.allocator, .{ try self.appendDummySegment();
.alignment = 1,
.size = 0,
.offset = 0,
});
return index; return index;
} else return result.value_ptr.*; } else return result.value_ptr.*;
}, },
.code => return self.code_section_index orelse blk: { .code => return self.code_section_index orelse blk: {
self.code_section_index = index; self.code_section_index = index;
try self.appendDummySegment();
break :blk index;
},
.debug => {
const debug_name = object.getDebugName(relocatable_data);
if (mem.eql(u8, debug_name, ".debug_info")) {
return self.debug_info_index orelse blk: {
self.debug_info_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_line")) {
return self.debug_line_index orelse blk: {
self.debug_line_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_loc")) {
return self.debug_loc_index orelse blk: {
self.debug_loc_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_ranges")) {
return self.debug_line_index orelse blk: {
self.debug_ranges_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_pubnames")) {
return self.debug_pubnames_index orelse blk: {
self.debug_pubnames_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_pubtypes")) {
return self.debug_pubtypes_index orelse blk: {
self.debug_pubtypes_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_abbrev")) {
return self.debug_abbrev_index orelse blk: {
self.debug_abbrev_index = index;
try self.appendDummySegment();
break :blk index;
};
} else if (mem.eql(u8, debug_name, ".debug_str")) {
return self.debug_str_index orelse blk: {
self.debug_str_index = index;
try self.appendDummySegment();
break :blk index;
};
} else {
log.warn("found unknown debug section '{s}'", .{debug_name});
log.warn(" debug section will be skipped", .{});
return null;
}
},
}
}
/// Appends a new segment with default field values
fn appendDummySegment(self: *Wasm) !void {
try self.segments.append(self.base.allocator, .{ try self.segments.append(self.base.allocator, .{
.alignment = 1, .alignment = 1,
.size = 0, .size = 0,
.offset = 0, .offset = 0,
}); });
break :blk index;
},
.custom => return error.@"TODO: Custom section relocations for wasm",
}
} }
/// Returns the symbol index of the error name table. /// Returns the symbol index of the error name table.
@ -1903,40 +2026,41 @@ fn populateErrorNameTable(self: *Wasm) !void {
try self.parseAtom(names_atom, .{ .data = .read_only }); try self.parseAtom(names_atom, .{ .data = .read_only });
} }
pub fn getDebugInfoIndex(self: *Wasm) !u32 { /// From a given index variable, creates a new debug section.
assert(self.dwarf != null); /// This initializes the index, appends a new segment,
return self.debug_info_index orelse { /// and finally, creates a managed `Atom`.
self.debug_info_index = @intCast(u32, self.segments.items.len); pub fn createDebugSectionForIndex(self: *Wasm, index: *?u32, name: []const u8) !*Atom {
const segment = try self.segments.addOne(self.base.allocator); const new_index = @intCast(u32, self.segments.items.len);
segment.* = .{ index.* = new_index;
.size = 0, try self.appendDummySegment();
.offset = 0, // _ = index;
// debug sections always have alignment '1'
.alignment = 1,
};
return self.debug_info_index.?;
};
}
pub fn getDebugLineIndex(self: *Wasm) !u32 { const sym_index = self.symbols_free_list.popOrNull() orelse idx: {
assert(self.dwarf != null); const tmp_index = @intCast(u32, self.symbols.items.len);
return self.debug_line_index orelse { _ = try self.symbols.addOne(self.base.allocator);
self.debug_line_index = @intCast(u32, self.segments.items.len); break :idx tmp_index;
const segment = try self.segments.addOne(self.base.allocator);
segment.* = .{
.size = 0,
.offset = 0,
.alignment = 1,
}; };
return self.debug_line_index.?; self.symbols.items[sym_index] = .{
.tag = .section,
.name = try self.string_table.put(self.base.allocator, name),
.index = 0,
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
}; };
const atom = try self.base.allocator.create(Atom);
atom.* = Atom.empty;
atom.alignment = 1; // debug sections are always 1-byte-aligned
atom.sym_index = sym_index;
try self.managed_atoms.append(self.base.allocator, atom);
try self.symbol_atom.put(self.base.allocator, atom.symbolLoc(), atom);
return atom;
} }
fn resetState(self: *Wasm) void { fn resetState(self: *Wasm) void {
for (self.segment_info.items) |*segment_info| { for (self.segment_info.values()) |segment_info| {
self.base.allocator.free(segment_info.name); self.base.allocator.free(segment_info.name);
} }
const mod = self.base.options.module.?; if (self.base.options.module) |mod| {
var decl_it = self.decls.keyIterator(); var decl_it = self.decls.keyIterator();
while (decl_it.next()) |decl_index_ptr| { while (decl_it.next()) |decl_index_ptr| {
const decl = mod.declPtr(decl_index_ptr.*); const decl = mod.declPtr(decl_index_ptr.*);
@ -1949,6 +2073,7 @@ fn resetState(self: *Wasm) void {
local_atom.prev = null; local_atom.prev = null;
} }
} }
}
self.functions.clearRetainingCapacity(); self.functions.clearRetainingCapacity();
self.exports.clearRetainingCapacity(); self.exports.clearRetainingCapacity();
self.segments.clearRetainingCapacity(); self.segments.clearRetainingCapacity();
@ -1959,6 +2084,12 @@ fn resetState(self: *Wasm) void {
self.code_section_index = null; self.code_section_index = null;
self.debug_info_index = null; self.debug_info_index = null;
self.debug_line_index = null; self.debug_line_index = null;
self.debug_loc_index = null;
self.debug_str_index = null;
self.debug_ranges_index = null;
self.debug_abbrev_index = null;
self.debug_pubnames_index = null;
self.debug_pubtypes_index = null;
} }
pub fn flush(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !void { pub fn flush(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !void {
@ -2036,7 +2167,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
defer self.resetState(); defer self.resetState();
try self.setupStart(); try self.setupStart();
try self.setupImports(); try self.setupImports();
const mod = self.base.options.module.?; if (self.base.options.module) |mod| {
var decl_it = self.decls.keyIterator(); var decl_it = self.decls.keyIterator();
while (decl_it.next()) |decl_index_ptr| { while (decl_it.next()) |decl_index_ptr| {
const decl = mod.declPtr(decl_index_ptr.*); const decl = mod.declPtr(decl_index_ptr.*);
@ -2062,13 +2193,15 @@ pub fn flushModule(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
} }
} }
if (self.dwarf) |*dwarf| {
try dwarf.flushModule(&self.base, self.base.options.module.?);
}
}
for (self.objects.items) |*object, object_index| { for (self.objects.items) |*object, object_index| {
try object.parseIntoAtoms(self.base.allocator, @intCast(u16, object_index), self); try object.parseIntoAtoms(self.base.allocator, @intCast(u16, object_index), self);
} }
if (self.dwarf) |*dwarf| {
try dwarf.flushModule(&self.base, self.base.options.module.?);
}
try self.allocateAtoms(); try self.allocateAtoms();
try self.setupMemory(); try self.setupMemory();
self.mapFunctionTable(); self.mapFunctionTable();
@ -2424,19 +2557,44 @@ pub fn flushModule(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
} }
} else if (!self.base.options.strip) { } else if (!self.base.options.strip) {
if (self.dwarf) |*dwarf| { if (self.dwarf) |*dwarf| {
if (self.debug_info_index != null) { const mod = self.base.options.module.?;
try dwarf.writeDbgAbbrev(&self.base); try dwarf.writeDbgAbbrev(&self.base);
// for debug info and ranges, the address is always 0, // for debug info and ranges, the address is always 0,
// as locations are always offsets relative to 'code' section. // as locations are always offsets relative to 'code' section.
try dwarf.writeDbgInfoHeader(&self.base, mod, 0, code_section_size); try dwarf.writeDbgInfoHeader(&self.base, mod, 0, code_section_size);
try dwarf.writeDbgAranges(&self.base, 0, code_section_size); try dwarf.writeDbgAranges(&self.base, 0, code_section_size);
try dwarf.writeDbgLineHeader(&self.base, mod); try dwarf.writeDbgLineHeader(&self.base, mod);
}
try emitDebugSection(file, self.debug_info.items, ".debug_info"); var debug_bytes = std.ArrayList(u8).init(self.base.allocator);
try emitDebugSection(file, self.debug_aranges.items, ".debug_ranges"); defer debug_bytes.deinit();
try emitDebugSection(file, self.debug_abbrev.items, ".debug_abbrev");
try emitDebugSection(file, self.debug_line.items, ".debug_line"); const DebugSection = struct {
try emitDebugSection(file, dwarf.strtab.items, ".debug_str"); name: []const u8,
index: ?u32,
};
const debug_sections: []const DebugSection = &.{
.{ .name = ".debug_info", .index = self.debug_info_index },
.{ .name = ".debug_pubtypes", .index = self.debug_pubtypes_index },
.{ .name = ".debug_abbrev", .index = self.debug_abbrev_index },
.{ .name = ".debug_line", .index = self.debug_line_index },
.{ .name = ".debug_str", .index = self.debug_str_index },
.{ .name = ".debug_pubnames", .index = self.debug_pubnames_index },
.{ .name = ".debug_loc", .index = self.debug_loc_index },
.{ .name = ".debug_ranges", .index = self.debug_ranges_index },
};
for (debug_sections) |item| {
if (item.index) |index| {
var atom = self.atoms.get(index).?.getFirst();
while (true) {
atom.resolveRelocs(self);
try debug_bytes.appendSlice(atom.code.items);
atom = atom.next orelse break;
}
try emitDebugSection(file, debug_bytes.items, item.name);
debug_bytes.clearRetainingCapacity();
} }
} }
try self.emitNameSection(file, arena); try self.emitNameSection(file, arena);
@ -2444,6 +2602,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
} }
fn emitDebugSection(file: fs.File, data: []const u8, name: []const u8) !void { fn emitDebugSection(file: fs.File, data: []const u8, name: []const u8) !void {
if (data.len == 0) return;
const header_offset = try reserveCustomSectionHeader(file); const header_offset = try reserveCustomSectionHeader(file);
const writer = file.writer(); const writer = file.writer();
try leb.writeULEB128(writer, @intCast(u32, name.len)); try leb.writeULEB128(writer, @intCast(u32, name.len));
@ -3057,14 +3216,26 @@ fn writeVecSectionHeader(file: fs.File, offset: u64, section: wasm.Section, size
buf[0] = @enumToInt(section); buf[0] = @enumToInt(section);
leb.writeUnsignedFixed(5, buf[1..6], size); leb.writeUnsignedFixed(5, buf[1..6], size);
leb.writeUnsignedFixed(5, buf[6..], items); leb.writeUnsignedFixed(5, buf[6..], items);
if (builtin.target.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12783
const curr_pos = try file.getPos();
try file.pwriteAll(&buf, offset); try file.pwriteAll(&buf, offset);
try file.seekTo(curr_pos);
} else try file.pwriteAll(&buf, offset);
} }
fn writeCustomSectionHeader(file: fs.File, offset: u64, size: u32) !void { fn writeCustomSectionHeader(file: fs.File, offset: u64, size: u32) !void {
var buf: [1 + 5]u8 = undefined; var buf: [1 + 5]u8 = undefined;
buf[0] = 0; // 0 = 'custom' section buf[0] = 0; // 0 = 'custom' section
leb.writeUnsignedFixed(5, buf[1..6], size); leb.writeUnsignedFixed(5, buf[1..6], size);
if (builtin.target.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12783
const curr_pos = try file.getPos();
try file.pwriteAll(&buf, offset); try file.pwriteAll(&buf, offset);
try file.seekTo(curr_pos);
} else try file.pwriteAll(&buf, offset);
} }
fn emitLinkSection(self: *Wasm, file: fs.File, arena: Allocator, symbol_table: *std.AutoArrayHashMap(SymbolLoc, u32)) !void { fn emitLinkSection(self: *Wasm, file: fs.File, arena: Allocator, symbol_table: *std.AutoArrayHashMap(SymbolLoc, u32)) !void {
@ -3149,8 +3320,8 @@ fn emitSegmentInfo(self: *Wasm, file: fs.File, arena: Allocator) !void {
var payload = std.ArrayList(u8).init(arena); var payload = std.ArrayList(u8).init(arena);
const writer = payload.writer(); const writer = payload.writer();
try leb.writeULEB128(file.writer(), @enumToInt(types.SubsectionType.WASM_SEGMENT_INFO)); try leb.writeULEB128(file.writer(), @enumToInt(types.SubsectionType.WASM_SEGMENT_INFO));
try leb.writeULEB128(writer, @intCast(u32, self.segment_info.items.len)); try leb.writeULEB128(writer, @intCast(u32, self.segment_info.count()));
for (self.segment_info.items) |segment_info| { for (self.segment_info.values()) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{ log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name, segment_info.name,
@ctz(segment_info.alignment), @ctz(segment_info.alignment),

View File

@ -95,6 +95,7 @@ const ar_hdr = extern struct {
}; };
pub fn deinit(archive: *Archive, allocator: Allocator) void { pub fn deinit(archive: *Archive, allocator: Allocator) void {
archive.file.close();
for (archive.toc.keys()) |*key| { for (archive.toc.keys()) |*key| {
allocator.free(key.*); allocator.free(key.*);
} }

View File

@ -90,6 +90,19 @@ pub fn getFirst(self: *Atom) *Atom {
return tmp; return tmp;
} }
/// Unlike `getFirst` this returns the first `*Atom` that was
/// produced from Zig code, rather than an object file.
/// This is useful for debug sections where we want to extend
/// the bytes, and don't want to overwrite existing Atoms.
pub fn getFirstZigAtom(self: *Atom) *Atom {
if (self.file == null) return self;
var tmp = self;
return while (tmp.prev) |prev| {
if (prev.file == null) break prev;
tmp = prev;
} else unreachable; // must allocate an Atom first!
}
/// Returns the location of the symbol that represents this `Atom` /// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(self: Atom) Wasm.SymbolLoc { pub fn symbolLoc(self: Atom) Wasm.SymbolLoc {
return .{ .file = self.file, .index = self.sym_index }; return .{ .file = self.file, .index = self.sym_index };
@ -145,7 +158,7 @@ pub fn resolveRelocs(self: *Atom, wasm_bin: *const Wasm) void {
/// All values will be represented as a `u64` as all values can fit within it. /// All values will be represented as a `u64` as all values can fit within it.
/// The final value must be casted to the correct size. /// The final value must be casted to the correct size.
fn relocationValue(self: Atom, relocation: types.Relocation, wasm_bin: *const Wasm) u64 { fn relocationValue(self: Atom, relocation: types.Relocation, wasm_bin: *const Wasm) u64 {
const target_loc: Wasm.SymbolLoc = .{ .file = self.file, .index = relocation.index }; const target_loc = (Wasm.SymbolLoc{ .file = self.file, .index = relocation.index }).finalLoc(wasm_bin);
const symbol = target_loc.getSymbol(wasm_bin).*; const symbol = target_loc.getSymbol(wasm_bin).*;
switch (relocation.relocation_type) { switch (relocation.relocation_type) {
.R_WASM_FUNCTION_INDEX_LEB => return symbol.index, .R_WASM_FUNCTION_INDEX_LEB => return symbol.index,
@ -174,19 +187,34 @@ fn relocationValue(self: Atom, relocation: types.Relocation, wasm_bin: *const Wa
=> { => {
std.debug.assert(symbol.tag == .data and !symbol.isUndefined()); std.debug.assert(symbol.tag == .data and !symbol.isUndefined());
const merge_segment = wasm_bin.base.options.output_mode != .Obj; const merge_segment = wasm_bin.base.options.output_mode != .Obj;
const target_atom_loc = wasm_bin.discarded.get(target_loc) orelse target_loc; const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
const target_atom = wasm_bin.symbol_atom.get(target_atom_loc).?;
const segment_info = if (target_atom.file) |object_index| blk: { const segment_info = if (target_atom.file) |object_index| blk: {
break :blk wasm_bin.objects.items[object_index].segment_info; break :blk wasm_bin.objects.items[object_index].segment_info;
} else wasm_bin.segment_info.items; } else wasm_bin.segment_info.values();
const segment_name = segment_info[symbol.index].outputName(merge_segment); const segment_name = segment_info[symbol.index].outputName(merge_segment);
const segment_index = wasm_bin.data_segments.get(segment_name).?; const segment_index = wasm_bin.data_segments.get(segment_name).?;
const segment = wasm_bin.segments.items[segment_index]; const segment = wasm_bin.segments.items[segment_index];
return target_atom.offset + segment.offset + (relocation.addend orelse 0); return target_atom.offset + segment.offset + (relocation.addend orelse 0);
}, },
.R_WASM_EVENT_INDEX_LEB => return symbol.index, .R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32, .R_WASM_SECTION_OFFSET_I32 => {
.R_WASM_FUNCTION_OFFSET_I32, const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
=> return relocation.offset, return target_atom.offset + (relocation.addend orelse 0);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
var atom = target_atom.getFirst();
var offset: u32 = 0;
// TODO: Calculate this during atom allocation, rather than
// this linear calculation. For now it's done here as atoms
// are being sorted after atom allocation, as functions aren't
// merged until later.
while (true) {
offset += 5; // each atom uses 5 bytes to store its body's size
if (atom == target_atom) break;
atom = atom.next.?;
}
return target_atom.offset + offset + (relocation.addend orelse 0);
},
} }
} }

View File

@ -63,16 +63,21 @@ relocatable_data: []const RelocatableData = &.{},
/// import name, module name and export names. Each string will be deduplicated /// import name, module name and export names. Each string will be deduplicated
/// and returns an offset into the table. /// and returns an offset into the table.
string_table: Wasm.StringTable = .{}, string_table: Wasm.StringTable = .{},
/// All the names of each debug section found in the current object file.
/// Each name is terminated by a null-terminator. The name can be found,
/// from the `index` offset within the `RelocatableData`.
debug_names: [:0]const u8,
/// Represents a single item within a section (depending on its `type`) /// Represents a single item within a section (depending on its `type`)
const RelocatableData = struct { const RelocatableData = struct {
/// The type of the relocatable data /// The type of the relocatable data
type: enum { data, code, custom }, type: enum { data, code, debug },
/// Pointer to the data of the segment, where its length is written to `size` /// Pointer to the data of the segment, where its length is written to `size`
data: [*]u8, data: [*]u8,
/// The size in bytes of the data representing the segment within the section /// The size in bytes of the data representing the segment within the section
size: u32, size: u32,
/// The index within the section itself /// The index within the section itself, or in case of a debug section,
/// the offset within the `string_table`.
index: u32, index: u32,
/// The offset within the section where the data starts /// The offset within the section where the data starts
offset: u32, offset: u32,
@ -96,9 +101,16 @@ const RelocatableData = struct {
return switch (self.type) { return switch (self.type) {
.data => .data, .data => .data,
.code => .function, .code => .function,
.custom => .section, .debug => .section,
}; };
} }
/// Returns the index within a section itself, or in case of a debug section,
/// returns the section index within the object file.
pub fn getIndex(self: RelocatableData) u32 {
if (self.type == .debug) return self.section_index;
return self.index;
}
}; };
pub const InitError = error{NotObjectFile} || ParseError || std.fs.File.ReadError; pub const InitError = error{NotObjectFile} || ParseError || std.fs.File.ReadError;
@ -111,6 +123,7 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz
var object: Object = .{ var object: Object = .{
.file = file, .file = file,
.name = try gpa.dupe(u8, name), .name = try gpa.dupe(u8, name),
.debug_names = &.{},
}; };
var is_object_file: bool = false; var is_object_file: bool = false;
@ -141,6 +154,9 @@ pub fn create(gpa: Allocator, file: std.fs.File, name: []const u8, maybe_max_siz
/// Frees all memory of `Object` at once. The given `Allocator` must be /// Frees all memory of `Object` at once. The given `Allocator` must be
/// the same allocator that was used when `init` was called. /// the same allocator that was used when `init` was called.
pub fn deinit(self: *Object, gpa: Allocator) void { pub fn deinit(self: *Object, gpa: Allocator) void {
if (self.file) |file| {
file.close();
}
for (self.func_types) |func_ty| { for (self.func_types) |func_ty| {
gpa.free(func_ty.params); gpa.free(func_ty.params);
gpa.free(func_ty.returns); gpa.free(func_ty.returns);
@ -197,6 +213,11 @@ pub fn importedCountByKind(self: *const Object, kind: std.wasm.ExternalKind) u32
} else i; } else i;
} }
/// From a given `RelocatableDate`, find the corresponding debug section name
pub fn getDebugName(self: *const Object, relocatable_data: RelocatableData) []const u8 {
return self.string_table.get(relocatable_data.index);
}
/// Checks if the object file is an MVP version. /// Checks if the object file is an MVP version.
/// When that's the case, we check if there's an import table definiton with its name /// When that's the case, we check if there's an import table definiton with its name
/// set to '__indirect_function_table". When that's also the case, /// set to '__indirect_function_table". When that's also the case,
@ -328,10 +349,15 @@ fn Parser(comptime ReaderType: type) type {
self.object.version = version; self.object.version = version;
var relocatable_data = std.ArrayList(RelocatableData).init(gpa); var relocatable_data = std.ArrayList(RelocatableData).init(gpa);
var debug_names = std.ArrayList(u8).init(gpa);
errdefer while (relocatable_data.popOrNull()) |rel_data| { errdefer {
while (relocatable_data.popOrNull()) |rel_data| {
gpa.free(rel_data.data[0..rel_data.size]); gpa.free(rel_data.data[0..rel_data.size]);
} else relocatable_data.deinit(); } else relocatable_data.deinit();
gpa.free(debug_names.items);
debug_names.deinit();
}
var section_index: u32 = 0; var section_index: u32 = 0;
while (self.reader.reader().readByte()) |byte| : (section_index += 1) { while (self.reader.reader().readByte()) |byte| : (section_index += 1) {
@ -347,11 +373,26 @@ fn Parser(comptime ReaderType: type) type {
if (std.mem.eql(u8, name, "linking")) { if (std.mem.eql(u8, name, "linking")) {
is_object_file.* = true; is_object_file.* = true;
self.object.relocatable_data = relocatable_data.items; // at this point no new relocatable sections will appear so we're free to store them.
try self.parseMetadata(gpa, @intCast(usize, reader.context.bytes_left)); try self.parseMetadata(gpa, @intCast(usize, reader.context.bytes_left));
} else if (std.mem.startsWith(u8, name, "reloc")) { } else if (std.mem.startsWith(u8, name, "reloc")) {
try self.parseRelocations(gpa); try self.parseRelocations(gpa);
} else if (std.mem.eql(u8, name, "target_features")) { } else if (std.mem.eql(u8, name, "target_features")) {
try self.parseFeatures(gpa); try self.parseFeatures(gpa);
} else if (std.mem.startsWith(u8, name, ".debug")) {
const debug_size = @intCast(u32, reader.context.bytes_left);
const debug_content = try gpa.alloc(u8, debug_size);
errdefer gpa.free(debug_content);
try reader.readNoEof(debug_content);
try relocatable_data.append(.{
.type = .debug,
.data = debug_content.ptr,
.size = debug_size,
.index = try self.object.string_table.put(gpa, name),
.offset = 0, // debug sections only contain 1 entry, so no need to calculate offset
.section_index = section_index,
});
} else { } else {
try reader.skipBytes(reader.context.bytes_left, .{}); try reader.skipBytes(reader.context.bytes_left, .{});
} }
@ -737,7 +778,12 @@ fn Parser(comptime ReaderType: type) type {
}, },
.section => { .section => {
symbol.index = try leb.readULEB128(u32, reader); symbol.index = try leb.readULEB128(u32, reader);
symbol.name = try self.object.string_table.put(gpa, @tagName(symbol.tag)); for (self.object.relocatable_data) |data| {
if (data.section_index == symbol.index) {
symbol.name = data.index;
break;
}
}
}, },
else => { else => {
symbol.index = try leb.readULEB128(u32, reader); symbol.index = try leb.readULEB128(u32, reader);
@ -827,7 +873,6 @@ fn assertEnd(reader: anytype) !void {
/// Parses an object file into atoms, for code and data sections /// Parses an object file into atoms, for code and data sections
pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin: *Wasm) !void { pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin: *Wasm) !void {
log.debug("Parsing data section into atoms", .{});
const Key = struct { const Key = struct {
kind: Symbol.Tag, kind: Symbol.Tag,
index: u32, index: u32,
@ -839,7 +884,7 @@ pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin
for (self.symtable) |symbol, symbol_index| { for (self.symtable) |symbol, symbol_index| {
switch (symbol.tag) { switch (symbol.tag) {
.function, .data => if (!symbol.isUndefined()) { .function, .data, .section => if (!symbol.isUndefined()) {
const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index }); const gop = try symbol_for_segment.getOrPut(.{ .kind = symbol.tag, .index = symbol.index });
const sym_idx = @intCast(u32, symbol_index); const sym_idx = @intCast(u32, symbol_index);
if (!gop.found_existing) { if (!gop.found_existing) {
@ -852,12 +897,9 @@ pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin
} }
for (self.relocatable_data) |relocatable_data, index| { for (self.relocatable_data) |relocatable_data, index| {
const symbols = symbol_for_segment.getPtr(.{ const final_index = (try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index))) orelse {
.kind = relocatable_data.getSymbolKind(), continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
.index = @intCast(u32, relocatable_data.index), };
}) orelse continue; // encountered a segment we do not create an atom for
const sym_index = symbols.pop();
const final_index = try wasm_bin.getMatchingSegment(object_index, @intCast(u32, index));
const atom = try gpa.create(Atom); const atom = try gpa.create(Atom);
atom.* = Atom.empty; atom.* = Atom.empty;
@ -870,7 +912,6 @@ pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin
atom.file = object_index; atom.file = object_index;
atom.size = relocatable_data.size; atom.size = relocatable_data.size;
atom.alignment = relocatable_data.getAlignment(self); atom.alignment = relocatable_data.getAlignment(self);
atom.sym_index = sym_index;
const relocations: []types.Relocation = self.relocations.get(relocatable_data.section_index) orelse &.{}; const relocations: []types.Relocation = self.relocations.get(relocatable_data.section_index) orelse &.{};
for (relocations) |relocation| { for (relocations) |relocation| {
@ -892,6 +933,12 @@ pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin
try atom.code.appendSlice(gpa, relocatable_data.data[0..relocatable_data.size]); try atom.code.appendSlice(gpa, relocatable_data.data[0..relocatable_data.size]);
if (symbol_for_segment.getPtr(.{
.kind = relocatable_data.getSymbolKind(),
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
// symbols referencing the same atom will be added as alias // symbols referencing the same atom will be added as alias
// or as 'parent' when they are global. // or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| { while (symbols.popOrNull()) |idx| {
@ -902,18 +949,15 @@ pub fn parseIntoAtoms(self: *Object, gpa: Allocator, object_index: u16, wasm_bin
} }
} }
try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom); try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
}
const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index]; const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned
segment.alignment = std.math.max(segment.alignment, atom.alignment); segment.alignment = std.math.max(segment.alignment, atom.alignment);
if (wasm_bin.atoms.getPtr(final_index)) |last| {
last.*.next = atom;
atom.prev = last.*;
last.* = atom;
} else {
try wasm_bin.atoms.putNoClobber(gpa, final_index, atom);
} }
log.debug("Parsed into atom: '{s}'", .{self.string_table.get(self.symtable[atom.sym_index].name)});
try wasm_bin.appendAtomAtIndex(final_index, atom);
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ self.string_table.get(self.symtable[atom.sym_index].name), final_index });
} }
} }

View File

@ -110,6 +110,10 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
return self.get(off) orelse unreachable; return self.get(off) orelse unreachable;
} }
pub fn items(self: Self) []const u8 {
return self.buffer.items;
}
pub fn len(self: Self) usize { pub fn len(self: Self) usize {
return self.buffer.items.len; return self.buffer.items.len;
} }

View File

@ -268,7 +268,7 @@ pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
} else if (mem.eql(u8, cmd, "init-lib")) { } else if (mem.eql(u8, cmd, "init-lib")) {
return cmdInit(gpa, arena, cmd_args, .Lib); return cmdInit(gpa, arena, cmd_args, .Lib);
} else if (mem.eql(u8, cmd, "targets")) { } else if (mem.eql(u8, cmd, "targets")) {
const info = try detectNativeTargetInfo(arena, .{}); const info = try detectNativeTargetInfo(.{});
const stdout = io.getStdOut().writer(); const stdout = io.getStdOut().writer();
return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target); return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target);
} else if (mem.eql(u8, cmd, "version")) { } else if (mem.eql(u8, cmd, "version")) {
@ -691,6 +691,9 @@ fn buildOutputType(
var linker_max_memory: ?u64 = null; var linker_max_memory: ?u64 = null;
var linker_shared_memory: bool = false; var linker_shared_memory: bool = false;
var linker_global_base: ?u64 = null; var linker_global_base: ?u64 = null;
var linker_print_gc_sections: bool = false;
var linker_print_icf_sections: bool = false;
var linker_print_map: bool = false;
var linker_z_nodelete = false; var linker_z_nodelete = false;
var linker_z_notext = false; var linker_z_notext = false;
var linker_z_defs = false; var linker_z_defs = false;
@ -1816,6 +1819,12 @@ fn buildOutputType(
linker_gc_sections = true; linker_gc_sections = true;
} else if (mem.eql(u8, arg, "--no-gc-sections")) { } else if (mem.eql(u8, arg, "--no-gc-sections")) {
linker_gc_sections = false; linker_gc_sections = false;
} else if (mem.eql(u8, arg, "--print-gc-sections")) {
linker_print_gc_sections = true;
} else if (mem.eql(u8, arg, "--print-icf-sections")) {
linker_print_icf_sections = true;
} else if (mem.eql(u8, arg, "--print-map")) {
linker_print_map = true;
} else if (mem.eql(u8, arg, "--allow-shlib-undefined") or } else if (mem.eql(u8, arg, "--allow-shlib-undefined") or
mem.eql(u8, arg, "-allow-shlib-undefined")) mem.eql(u8, arg, "-allow-shlib-undefined"))
{ {
@ -2258,7 +2267,7 @@ fn buildOutputType(
} }
const cross_target = try parseCrossTargetOrReportFatalError(arena, target_parse_options); const cross_target = try parseCrossTargetOrReportFatalError(arena, target_parse_options);
const target_info = try detectNativeTargetInfo(gpa, cross_target); const target_info = try detectNativeTargetInfo(cross_target);
if (target_info.target.os.tag != .freestanding) { if (target_info.target.os.tag != .freestanding) {
if (ensure_libc_on_non_freestanding) if (ensure_libc_on_non_freestanding)
@ -2911,6 +2920,9 @@ fn buildOutputType(
.linker_initial_memory = linker_initial_memory, .linker_initial_memory = linker_initial_memory,
.linker_max_memory = linker_max_memory, .linker_max_memory = linker_max_memory,
.linker_shared_memory = linker_shared_memory, .linker_shared_memory = linker_shared_memory,
.linker_print_gc_sections = linker_print_gc_sections,
.linker_print_icf_sections = linker_print_icf_sections,
.linker_print_map = linker_print_map,
.linker_global_base = linker_global_base, .linker_global_base = linker_global_base,
.linker_export_symbol_names = linker_export_symbol_names.items, .linker_export_symbol_names = linker_export_symbol_names.items,
.linker_z_nodelete = linker_z_nodelete, .linker_z_nodelete = linker_z_nodelete,
@ -3271,7 +3283,7 @@ fn runOrTest(
if (std.process.can_execv and arg_mode == .run and !watch) { if (std.process.can_execv and arg_mode == .run and !watch) {
// execv releases the locks; no need to destroy the Compilation here. // execv releases the locks; no need to destroy the Compilation here.
const err = std.process.execv(gpa, argv.items); const err = std.process.execv(gpa, argv.items);
try warnAboutForeignBinaries(gpa, arena, arg_mode, target_info, link_libc); try warnAboutForeignBinaries(arena, arg_mode, target_info, link_libc);
const cmd = try std.mem.join(arena, " ", argv.items); const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd }); fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd });
} else if (std.process.can_spawn) { } else if (std.process.can_spawn) {
@ -3288,7 +3300,7 @@ fn runOrTest(
} }
const term = child.spawnAndWait() catch |err| { const term = child.spawnAndWait() catch |err| {
try warnAboutForeignBinaries(gpa, arena, arg_mode, target_info, link_libc); try warnAboutForeignBinaries(arena, arg_mode, target_info, link_libc);
const cmd = try std.mem.join(arena, " ", argv.items); const cmd = try std.mem.join(arena, " ", argv.items);
fatal("the following command failed with '{s}':\n{s}", .{ @errorName(err), cmd }); fatal("the following command failed with '{s}':\n{s}", .{ @errorName(err), cmd });
}; };
@ -3902,7 +3914,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
gimmeMoreOfThoseSweetSweetFileDescriptors(); gimmeMoreOfThoseSweetSweetFileDescriptors();
const cross_target: std.zig.CrossTarget = .{}; const cross_target: std.zig.CrossTarget = .{};
const target_info = try detectNativeTargetInfo(gpa, cross_target); const target_info = try detectNativeTargetInfo(cross_target);
const exe_basename = try std.zig.binNameAlloc(arena, .{ const exe_basename = try std.zig.binNameAlloc(arena, .{
.root_name = "build", .root_name = "build",
@ -4944,8 +4956,8 @@ test "fds" {
gimmeMoreOfThoseSweetSweetFileDescriptors(); gimmeMoreOfThoseSweetSweetFileDescriptors();
} }
fn detectNativeTargetInfo(gpa: Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo { fn detectNativeTargetInfo(cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo {
return std.zig.system.NativeTargetInfo.detect(gpa, cross_target); return std.zig.system.NativeTargetInfo.detect(cross_target);
} }
/// Indicate that we are now terminating with a successful exit code. /// Indicate that we are now terminating with a successful exit code.
@ -5308,14 +5320,13 @@ fn parseIntSuffix(arg: []const u8, prefix_len: usize) u64 {
} }
fn warnAboutForeignBinaries( fn warnAboutForeignBinaries(
gpa: Allocator,
arena: Allocator, arena: Allocator,
arg_mode: ArgMode, arg_mode: ArgMode,
target_info: std.zig.system.NativeTargetInfo, target_info: std.zig.system.NativeTargetInfo,
link_libc: bool, link_libc: bool,
) !void { ) !void {
const host_cross_target: std.zig.CrossTarget = .{}; const host_cross_target: std.zig.CrossTarget = .{};
const host_target_info = try detectNativeTargetInfo(gpa, host_cross_target); const host_target_info = try detectNativeTargetInfo(host_cross_target);
switch (host_target_info.getExternalExecutor(target_info, .{ .link_libc = link_libc })) { switch (host_target_info.getExternalExecutor(target_info, .{ .link_libc = link_libc })) {
.native => return, .native => return,

View File

@ -177,6 +177,8 @@ const TestManifestConfigDefaults = struct {
inline for (&[_][]const u8{ "x86_64", "aarch64" }) |arch| { inline for (&[_][]const u8{ "x86_64", "aarch64" }) |arch| {
defaults = defaults ++ arch ++ "-macos" ++ ","; defaults = defaults ++ arch ++ "-macos" ++ ",";
} }
// Windows
defaults = defaults ++ "x86_64-windows" ++ ",";
// Wasm // Wasm
defaults = defaults ++ "wasm32-wasi"; defaults = defaults ++ "wasm32-wasi";
return defaults; return defaults;
@ -1211,7 +1213,7 @@ pub const TestContext = struct {
} }
fn run(self: *TestContext) !void { fn run(self: *TestContext) !void {
const host = try std.zig.system.NativeTargetInfo.detect(self.gpa, .{}); const host = try std.zig.system.NativeTargetInfo.detect(.{});
var progress = std.Progress{}; var progress = std.Progress{};
const root_node = progress.start("compiler", self.cases.items.len); const root_node = progress.start("compiler", self.cases.items.len);
@ -1300,7 +1302,7 @@ pub const TestContext = struct {
global_cache_directory: Compilation.Directory, global_cache_directory: Compilation.Directory,
host: std.zig.system.NativeTargetInfo, host: std.zig.system.NativeTargetInfo,
) !void { ) !void {
const target_info = try std.zig.system.NativeTargetInfo.detect(allocator, case.target); const target_info = try std.zig.system.NativeTargetInfo.detect(case.target);
const target = target_info.target; const target = target_info.target;
var arena_allocator = std.heap.ArenaAllocator.init(allocator); var arena_allocator = std.heap.ArenaAllocator.init(allocator);
@ -1546,6 +1548,12 @@ pub const TestContext = struct {
.self_exe_path = std.testing.zig_exe_path, .self_exe_path = std.testing.zig_exe_path,
// TODO instead of turning off color, pass in a std.Progress.Node // TODO instead of turning off color, pass in a std.Progress.Node
.color = .off, .color = .off,
// TODO: force self-hosted linkers with stage2 backend to avoid LLD creeping in
// until the auto-select mechanism deems them worthy
.use_lld = switch (case.backend) {
.stage2 => false,
else => null,
},
}); });
defer comp.destroy(); defer comp.destroy();

View File

@ -1167,7 +1167,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
} }
if (!c.zig_is_stage1 and is_packed) { if (!c.zig_is_stage1 and is_packed) {
return failDecl(c, record_loc, bare_name, "cannot translate packed record union", .{}); return failDecl(c, record_loc, name, "cannot translate packed record union", .{});
} }
const record_payload = try c.arena.create(ast.Payload.Record); const record_payload = try c.arena.create(ast.Payload.Record);
@ -5799,7 +5799,7 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
} }
} }
for (source) |c| { for (source) |c| {
if (c == '\\') { if (c == '\\' or c == '\t') {
break; break;
} }
} else return source; } else return source;
@ -5876,6 +5876,13 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 {
state = .Start; state = .Start;
}, },
.Start => { .Start => {
if (c == '\t') {
bytes[i] = '\\';
i += 1;
bytes[i] = 't';
i += 1;
continue;
}
if (c == '\\') { if (c == '\\') {
state = .Escape; state = .Escape;
} }

View File

@ -86,6 +86,7 @@ test {
_ = @import("behavior/bugs/12430.zig"); _ = @import("behavior/bugs/12430.zig");
_ = @import("behavior/bugs/12486.zig"); _ = @import("behavior/bugs/12486.zig");
_ = @import("behavior/bugs/12680.zig"); _ = @import("behavior/bugs/12680.zig");
_ = @import("behavior/bugs/12776.zig");
_ = @import("behavior/byteswap.zig"); _ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig"); _ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call.zig"); _ = @import("behavior/call.zig");

View File

@ -0,0 +1,42 @@
const std = @import("std");
const builtin = @import("builtin");
const RAM = struct {
data: [0xFFFF + 1]u8,
fn new() !RAM {
return RAM{ .data = [_]u8{0} ** 0x10000 };
}
fn get(self: *RAM, addr: u16) u8 {
return self.data[addr];
}
};
const CPU = packed struct {
interrupts: bool,
ram: *RAM,
fn new(ram: *RAM) !CPU {
return CPU{
.ram = ram,
.interrupts = false,
};
}
fn tick(self: *CPU) !void {
var queued_interrupts = self.ram.get(0xFFFF) & self.ram.get(0xFF0F);
if (self.interrupts and queued_interrupts != 0) {
self.interrupts = false;
}
}
};
test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var ram = try RAM.new();
var cpu = try CPU.new(&ram);
try cpu.tick();
try std.testing.expect(cpu.interrupts == false);
}

View File

@ -486,3 +486,14 @@ test "array slicing to slice" {
try S.doTheTest(); try S.doTheTest();
comptime try S.doTheTest(); comptime try S.doTheTest();
} }
test "pointer to constant decl preserves alignment" {
const S = struct {
a: u8,
b: u8,
const aligned align(8) = @This(){ .a = 3, .b = 4 };
};
const alignment = @typeInfo(@TypeOf(&S.aligned)).Pointer.alignment;
try std.testing.expect(alignment == 8);
}

View File

@ -50,3 +50,5 @@ typedef _Bool uintptr_t;
#define CAST_TO_UINTPTR(X) (uintptr_t)(X) #define CAST_TO_UINTPTR(X) (uintptr_t)(X)
#define LARGE_INT 18446744073709550592 #define LARGE_INT 18446744073709550592
#define EMBEDDED_TAB "hello "

View File

@ -2,6 +2,7 @@ const builtin = @import("builtin");
const std = @import("std"); const std = @import("std");
const expect = std.testing.expect; const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual; const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
const h = @cImport(@cInclude("behavior/translate_c_macros.h")); const h = @cImport(@cInclude("behavior/translate_c_macros.h"));
@ -123,3 +124,13 @@ test "large integer macro" {
try expectEqual(@as(c_ulonglong, 18446744073709550592), h.LARGE_INT); try expectEqual(@as(c_ulonglong, 18446744073709550592), h.LARGE_INT);
} }
test "string literal macro with embedded tab character" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try expectEqualStrings("hello\t", h.EMBEDDED_TAB);
}

View File

@ -2,5 +2,5 @@
// output_mode=Exe // output_mode=Exe
// target=aarch64-macos // target=aarch64-macos
// //
// :105:9: error: struct 'tmp.tmp' has no member named 'main' // :109:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here // :7:1: note: struct declared here

View File

@ -0,0 +1,26 @@
pub inline fn instanceRequestAdapter() void {}
pub inline fn requestAdapter(
comptime callbackArg: fn () callconv(.Inline) void,
) void {
_ = (struct {
pub fn callback() callconv(.C) void {
callbackArg();
}
}).callback;
instanceRequestAdapter(undefined); // note wrong number of arguments here
}
inline fn foo() void {}
pub export fn entry() void {
requestAdapter(foo);
}
// error
// backend=stage2
// target=native
//
// :11:5: error: expected 0 argument(s), found 1
// :1:12: note: function declared here
// :17:19: note: called from here

View File

@ -0,0 +1,24 @@
fn Observable(comptime T: type) type {
return struct {
fn map(Src: T, Dst: anytype, function: fn (T) Dst) Dst {
_ = Src;
_ = function;
return Observable(Dst);
}
};
}
fn u32Tou64(x: u32) u64 {
_ = x;
return 0;
}
pub export fn entry() void {
Observable(u32).map(u32, u64, u32Tou64(0));
}
// error
// backend=stage2
// target=native
//
// :17:25: error: expected type 'u32', found 'type'

View File

@ -0,0 +1,19 @@
pub export fn entry() void {
var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
var slice: []u8 = &buf;
const a: u32 = 1234;
@memcpy(slice, @ptrCast([*]const u8, &a), 4);
}
pub export fn entry1() void {
var buf: [5]u8 = .{ 1, 2, 3, 4, 5 };
var ptr: *u8 = &buf[0];
@memcpy(ptr, 0, 4);
}
// error
// backend=stage2
// target=native
//
// :5:13: error: expected type '[*]u8', found '[]u8'
// :10:13: error: expected type '[*]u8', found '*u8'
// :10:13: note: a single pointer cannot cast into a many pointer

View File

@ -2,5 +2,5 @@
// output_mode=Exe // output_mode=Exe
// target=x86_64-linux // target=x86_64-linux
// //
// :105:9: error: struct 'tmp.tmp' has no member named 'main' // :109:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here // :7:1: note: struct declared here

View File

@ -2,5 +2,5 @@
// output_mode=Exe // output_mode=Exe
// target=x86_64-macos // target=x86_64-macos
// //
// :105:9: error: struct 'tmp.tmp' has no member named 'main' // :109:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here // :7:1: note: struct declared here

View File

@ -0,0 +1,6 @@
// error
// output_mode=Exe
// target=x86_64-windows
//
// :130:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -0,0 +1,6 @@
pub export fn main() noreturn {}
// error
//
// :1:32: error: function declared 'noreturn' returns
// :1:22: note: 'noreturn' declared here

View File

@ -0,0 +1,16 @@
const std = @import("std");
pub fn main() void {
print();
}
fn print() void {
const msg = "Hello, World!\n";
const stdout = std.io.getStdOut();
stdout.writeAll(msg) catch unreachable;
}
// run
//
// Hello, World!
//

View File

@ -28,11 +28,22 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
} }
fn addWasmCases(cases: *tests.StandaloneContext) void { fn addWasmCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/link/wasm/archive/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
cases.addBuildFile("test/link/wasm/bss/build.zig", .{ cases.addBuildFile("test/link/wasm/bss/build.zig", .{
.build_modes = true, .build_modes = true,
.requires_stage2 = true, .requires_stage2 = true,
}); });
cases.addBuildFile("test/link/wasm/extern/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
.use_emulation = true,
});
cases.addBuildFile("test/link/wasm/segments/build.zig", .{ cases.addBuildFile("test/link/wasm/segments/build.zig", .{
.build_modes = true, .build_modes = true,
.requires_stage2 = true, .requires_stage2 = true,
@ -47,17 +58,6 @@ fn addWasmCases(cases: *tests.StandaloneContext) void {
.build_modes = true, .build_modes = true,
.requires_stage2 = true, .requires_stage2 = true,
}); });
cases.addBuildFile("test/link/wasm/archive/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
});
cases.addBuildFile("test/link/wasm/extern/build.zig", .{
.build_modes = true,
.requires_stage2 = true,
.use_emulation = true,
});
} }
fn addMachOCases(cases: *tests.StandaloneContext) void { fn addMachOCases(cases: *tests.StandaloneContext) void {

View File

@ -108,6 +108,14 @@ const test_targets = blk: {
}, },
.backend = .stage2_x86_64, .backend = .stage2_x86_64,
}, },
.{
.target = .{
.cpu_arch = .x86_64,
.os_tag = .windows,
.abi = .gnu,
},
.backend = .stage2_x86_64,
},
.{ .{
.target = .{ .target = .{
@ -693,6 +701,8 @@ pub fn addPkgTests(
else => { else => {
these_tests.use_stage1 = false; these_tests.use_stage1 = false;
these_tests.use_llvm = false; these_tests.use_llvm = false;
// TODO: force self-hosted linkers to avoid LLD creeping in until the auto-select mechanism deems them worthy
these_tests.use_lld = false;
}, },
}; };