Merge pull request #24193 from jacobly0/x86_64-spring-cleaning

x86_64: increase passing test coverage on windows
This commit is contained in:
Jacob Young 2025-06-20 00:20:56 -04:00 committed by GitHub
commit cf1a7bbd44
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
145 changed files with 13157 additions and 8614 deletions

View File

@ -203,6 +203,10 @@ pub fn build(b: *std.Build) !void {
exe.pie = pie;
exe.entitlements = entitlements;
const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
exe.use_llvm = use_llvm;
exe.use_lld = use_llvm;
if (no_bin) {
b.getInstallStep().dependOn(&exe.step);
} else {
@ -214,10 +218,6 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(&exe.step);
const use_llvm = b.option(bool, "use-llvm", "Use the llvm backend");
exe.use_llvm = use_llvm;
exe.use_lld = use_llvm;
const exe_options = b.addOptions();
exe.root_module.addOptions("build_options", exe_options);
@ -759,7 +759,7 @@ fn addCmakeCfgOptionsToExe(
use_zig_libcxx: bool,
) !void {
const mod = exe.root_module;
const target = mod.resolved_target.?.result;
const target = &mod.resolved_target.?.result;
if (target.os.tag.isDarwin()) {
// useful for package maintainers

View File

@ -525,7 +525,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
};
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const is_native_abi = target_query.isNativeAbi();
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, target, is_native_abi, true, null) catch {
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch {
if (includes == .any) {
// fall back to mingw
includes = .gnu;
@ -550,7 +550,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
};
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const is_native_abi = target_query.isNativeAbi();
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, target, is_native_abi, true, null) catch |err| switch (err) {
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => return error.MingwIncludesNotFound,
};

View File

@ -16,10 +16,10 @@ else
/// Determines the symbol's visibility to other objects.
/// For WebAssembly this allows the symbol to be resolved to other modules, but will not
/// export it to the host runtime.
pub const visibility: std.builtin.SymbolVisibility = if (linkage != .internal)
.hidden
pub const visibility: std.builtin.SymbolVisibility = if (linkage == .internal or builtin.link_mode == .dynamic)
.default
else
.default;
.hidden;
pub const PreferredLoadStoreElement = element: {
if (std.simd.suggestVectorLength(u8)) |vec_size| {

View File

@ -35,7 +35,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []u32 = @ptrCast(@alignCast(v_p[0..byte_size]));
@ -44,7 +44,7 @@ pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) vo
pub fn __modei4(r_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []u32 = @ptrCast(@alignCast(v_p[0..byte_size]));

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixdfei(r: [*]u8, bits: usize, a: f64) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixhfei(r: [*]u8, bits: usize, a: f16) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixsfei(r: [*]u8, bits: usize, a: f32) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixtfei(r: [*]u8, bits: usize, a: f128) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunsdfei(r: [*]u8, bits: usize, a: f64) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunshfei(r: [*]u8, bits: usize, a: f16) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunssfei(r: [*]u8, bits: usize, a: f32) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunstfei(r: [*]u8, bits: usize, a: f128) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixunsxfei(r: [*]u8, bits: usize, a: f80) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.unsigned, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __fixxfei(r: [*]u8, bits: usize, a: f80) callconv(.c) void {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return bigIntFromFloat(.signed, @ptrCast(@alignCast(r[0..byte_size])), a);
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floateidf(a: [*]const u8, bits: usize) callconv(.c) f64 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f64, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floateihf(a: [*]const u8, bits: usize) callconv(.c) f16 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f16, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floateisf(a: [*]const u8, bits: usize) callconv(.c) f32 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f32, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floateitf(a: [*]const u8, bits: usize) callconv(.c) f128 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f128, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floateixf(a: [*]const u8, bits: usize) callconv(.c) f80 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f80, .signed, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneidf(a: [*]const u8, bits: usize) callconv(.c) f64 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f64, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneihf(a: [*]const u8, bits: usize) callconv(.c) f16 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f16, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneisf(a: [*]const u8, bits: usize) callconv(.c) f32 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f32, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneitf(a: [*]const u8, bits: usize) callconv(.c) f128 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f128, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -10,6 +10,6 @@ comptime {
}
pub fn __floatuneixf(a: [*]const u8, bits: usize) callconv(.c) f80 {
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
return floatFromBigInt(f80, .unsigned, @ptrCast(@alignCast(a[0..byte_size])));
}

View File

@ -127,23 +127,23 @@ fn win_probe_stack_only() void {
},
.x86_64 => {
asm volatile (
\\ push %%rcx
\\ push %%rax
\\ cmp $0x1000,%%rax
\\ lea 24(%%rsp),%%rcx
\\ pushq %%rcx
\\ pushq %%rax
\\ cmpq $0x1000,%%rax
\\ leaq 24(%%rsp),%%rcx
\\ jb 1f
\\ 2:
\\ sub $0x1000,%%rcx
\\ test %%rcx,(%%rcx)
\\ sub $0x1000,%%rax
\\ cmp $0x1000,%%rax
\\ subq $0x1000,%%rcx
\\ testq %%rcx,(%%rcx)
\\ subq $0x1000,%%rax
\\ cmpq $0x1000,%%rax
\\ ja 2b
\\ 1:
\\ sub %%rax,%%rcx
\\ test %%rcx,(%%rcx)
\\ pop %%rax
\\ pop %%rcx
\\ ret
\\ subq %%rax,%%rcx
\\ testq %%rcx,(%%rcx)
\\ popq %%rax
\\ popq %%rcx
\\ retq
);
},
.x86 => {
@ -179,26 +179,26 @@ fn win_probe_stack_adjust_sp() void {
switch (arch) {
.x86_64 => {
asm volatile (
\\ push %%rcx
\\ cmp $0x1000,%%rax
\\ lea 16(%%rsp),%%rcx
\\ pushq %%rcx
\\ cmpq $0x1000,%%rax
\\ leaq 16(%%rsp),%%rcx
\\ jb 1f
\\ 2:
\\ sub $0x1000,%%rcx
\\ test %%rcx,(%%rcx)
\\ sub $0x1000,%%rax
\\ cmp $0x1000,%%rax
\\ subq $0x1000,%%rcx
\\ testq %%rcx,(%%rcx)
\\ subq $0x1000,%%rax
\\ cmpq $0x1000,%%rax
\\ ja 2b
\\ 1:
\\ sub %%rax,%%rcx
\\ test %%rcx,(%%rcx)
\\ subq %%rax,%%rcx
\\ testq %%rcx,(%%rcx)
\\
\\ lea 8(%%rsp),%%rax
\\ mov %%rcx,%%rsp
\\ mov -8(%%rax),%%rcx
\\ push (%%rax)
\\ sub %%rsp,%%rax
\\ ret
\\ leaq 8(%%rsp),%%rax
\\ movq %%rcx,%%rsp
\\ movq -8(%%rax),%%rcx
\\ pushq (%%rax)
\\ subq %%rsp,%%rax
\\ retq
);
},
.x86 => {

View File

@ -114,7 +114,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []const u32 = @ptrCast(@alignCast(v_p[0..byte_size]));
@ -123,7 +123,7 @@ pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
@setRuntimeSafety(builtin.is_test);
const byte_size = std.zig.target.intByteSize(builtin.target, @intCast(bits));
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
const v: []const u32 = @ptrCast(@alignCast(v_p[0..byte_size]));

View File

@ -1164,7 +1164,14 @@ pub fn addRunArtifact(b: *Build, exe: *Step.Compile) *Step.Run {
// It doesn't have to be native. We catch that if you actually try to run it.
// Consider that this is declarative; the run step may not be run unless a user
// option is supplied.
const run_step = Step.Run.create(b, b.fmt("run {s}", .{exe.name}));
// Avoid the common case of the step name looking like "run test test".
const step_name = if (exe.kind.isTest() and mem.eql(u8, exe.name, "test"))
b.fmt("run {s}", .{@tagName(exe.kind)})
else
b.fmt("run {s} {s}", .{ @tagName(exe.kind), exe.name });
const run_step = Step.Run.create(b, step_name);
run_step.producer = exe;
if (exe.kind == .@"test") {
if (exe.exec_cmd_args) |exec_cmd_args| {
@ -2449,12 +2456,23 @@ pub const GeneratedFile = struct {
/// This value must be set in the `fn make()` of the `step` and must not be `null` afterwards.
path: ?[]const u8 = null,
/// Deprecated, see `getPath2`.
pub fn getPath(gen: GeneratedFile) []const u8 {
return gen.step.owner.pathFromCwd(gen.path orelse std.debug.panic(
"getPath() was called on a GeneratedFile that wasn't built yet. Is there a missing Step dependency on step '{s}'?",
.{gen.step.name},
));
}
pub fn getPath2(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) []const u8 {
return gen.path orelse {
std.debug.lockStdErr();
const stderr = std.io.getStdErr();
dumpBadGetPathHelp(gen.step, stderr, src_builder, asking_step) catch {};
std.debug.unlockStdErr();
@panic("misconfigured build script");
};
}
};
// dirnameAllowEmpty is a variant of fs.path.dirname
@ -2705,6 +2723,18 @@ pub const LazyPath = union(enum) {
}
}
pub fn basename(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 {
return fs.path.basename(switch (lazy_path) {
.src_path => |sp| sp.sub_path,
.cwd_relative => |sub_path| sub_path,
.generated => |gen| if (gen.sub_path.len > 0)
gen.sub_path
else
gen.file.getPath2(src_builder, asking_step),
.dependency => |dep| dep.sub_path,
});
}
/// Copies the internal strings.
///
/// The `b` parameter is only used for its allocator. All *Build instances

View File

@ -198,10 +198,10 @@ fn serveWasm(
const wasm_base_path = try buildWasmBinary(ws, arena, optimize_mode);
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = fuzzer_bin_name,
.target = std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
.target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
.arch_os_abi = fuzzer_arch_os_abi,
.cpu_features = fuzzer_cpu_features,
}) catch unreachable) catch unreachable,
}) catch unreachable) catch unreachable),
.output_mode = .Exe,
});
// std.http.Server does not have a sendfile API yet.

View File

@ -655,10 +655,10 @@ fn linkLibraryOrObject(m: *Module, other: *Step.Compile) void {
m.include_dirs.append(allocator, .{ .other_step = other }) catch @panic("OOM");
}
fn requireKnownTarget(m: *Module) std.Target {
const resolved_target = m.resolved_target orelse
@panic("this API requires the Module to be created with a known 'target' field");
return resolved_target.result;
fn requireKnownTarget(m: *Module) *const std.Target {
const resolved_target = &(m.resolved_target orelse
@panic("this API requires the Module to be created with a known 'target' field"));
return &resolved_target.result;
}
/// Elements of `modules` and `names` are matched one-to-one.

View File

@ -478,6 +478,29 @@ pub fn evalZigProcess(
return result;
}
/// Wrapper around `std.fs.Dir.updateFile` that handles verbose and error output.
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !std.fs.Dir.PrevStatus {
const b = s.owner;
const src_path = src_lazy_path.getPath3(b, s);
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{}", .{src_path}), dest_path });
return src_path.root_dir.handle.updateFile(src_path.sub_path, std.fs.cwd(), dest_path, .{}) catch |err| {
return s.fail("unable to update file from '{}' to '{s}': {s}", .{
src_path, dest_path, @errorName(err),
});
};
}
/// Wrapper around `std.fs.Dir.makePathStatus` that handles verbose and error output.
pub fn installDir(s: *Step, dest_path: []const u8) !std.fs.Dir.MakePathStatus {
const b = s.owner;
try handleVerbose(b, null, &.{ "install", "-d", dest_path });
return std.fs.cwd().makePathStatus(dest_path) catch |err| {
return s.fail("unable to create dir '{s}': {s}", .{
dest_path, @errorName(err),
});
};
}
fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
const b = s.owner;
const arena = b.allocator;
@ -714,8 +737,44 @@ pub fn allocPrintCmd2(
opt_env: ?*const std.process.EnvMap,
argv: []const []const u8,
) Allocator.Error![]u8 {
const shell = struct {
fn escape(writer: anytype, string: []const u8, is_argv0: bool) !void {
for (string) |c| {
if (switch (c) {
else => true,
'%', '+'...':', '@'...'Z', '_', 'a'...'z' => false,
'=' => is_argv0,
}) break;
} else return writer.writeAll(string);
try writer.writeByte('"');
for (string) |c| {
if (switch (c) {
std.ascii.control_code.nul => break,
'!', '"', '$', '\\', '`' => true,
else => !std.ascii.isPrint(c),
}) try writer.writeByte('\\');
switch (c) {
std.ascii.control_code.nul => unreachable,
std.ascii.control_code.bel => try writer.writeByte('a'),
std.ascii.control_code.bs => try writer.writeByte('b'),
std.ascii.control_code.ht => try writer.writeByte('t'),
std.ascii.control_code.lf => try writer.writeByte('n'),
std.ascii.control_code.vt => try writer.writeByte('v'),
std.ascii.control_code.ff => try writer.writeByte('f'),
std.ascii.control_code.cr => try writer.writeByte('r'),
std.ascii.control_code.esc => try writer.writeByte('E'),
' '...'~' => try writer.writeByte(c),
else => try writer.print("{o:0>3}", .{c}),
}
}
try writer.writeByte('"');
}
};
var buf: std.ArrayListUnmanaged(u8) = .empty;
if (opt_cwd) |cwd| try buf.writer(arena).print("cd {s} && ", .{cwd});
const writer = buf.writer(arena);
if (opt_cwd) |cwd| try writer.print("cd {s} && ", .{cwd});
if (opt_env) |env| {
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
var it = env.iterator();
@ -725,11 +784,15 @@ pub fn allocPrintCmd2(
if (process_env_map.get(key)) |process_value| {
if (std.mem.eql(u8, value, process_value)) continue;
}
try buf.writer(arena).print("{s}={s} ", .{ key, value });
try writer.print("{s}=", .{key});
try shell.escape(writer, value, false);
try writer.writeByte(' ');
}
}
for (argv) |arg| {
try buf.writer(arena).print("{s} ", .{arg});
try shell.escape(writer, argv[0], true);
for (argv[1..]) |arg| {
try writer.writeByte(' ');
try shell.escape(writer, arg, false);
}
return buf.toOwnedSlice(arena);
}

View File

@ -292,6 +292,13 @@ pub const Kind = enum {
obj,
@"test",
test_obj,
pub fn isTest(kind: Kind) bool {
return switch (kind) {
.exe, .lib, .obj => false,
.@"test", .test_obj => true,
};
}
};
pub const HeaderInstallation = union(enum) {
@ -368,19 +375,16 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name});
}
// Avoid the common case of the step name looking like "zig test test".
const name_adjusted = if ((options.kind == .@"test" or options.kind == .test_obj) and mem.eql(u8, name, "test"))
""
else
owner.fmt("{s} ", .{name});
const resolved_target = options.root_module.resolved_target orelse
@panic("the root Module of a Compile step must be created with a known 'target' field");
const target = resolved_target.result;
const target = &resolved_target.result;
const step_name = owner.fmt("compile {s} {s}{s} {s}", .{
@tagName(options.kind),
name_adjusted,
const step_name = owner.fmt("compile {s} {s} {s}", .{
// Avoid the common case of the step name looking like "compile test test".
if (options.kind.isTest() and mem.eql(u8, name, "test"))
@tagName(options.kind)
else
owner.fmt("{s} {s}", .{ @tagName(options.kind), name }),
@tagName(options.root_module.optimize orelse .Debug),
resolved_target.query.zigTriple(owner.allocator) catch @panic("OOM"),
});
@ -664,6 +668,7 @@ pub fn producesPdbFile(compile: *Compile) bool {
else => return false,
}
if (target.ofmt == .c) return false;
if (compile.use_llvm == false) return false;
if (compile.root_module.strip == true or
(compile.root_module.strip == null and compile.root_module.optimize == .ReleaseSmall))
{
@ -1861,7 +1866,7 @@ fn outputPath(c: *Compile, out_dir: std.Build.Cache.Path, ea: std.zig.EmitArtifa
const arena = c.step.owner.graph.arena;
const name = ea.cacheName(arena, .{
.root_name = c.name,
.target = c.root_module.resolved_target.?.result,
.target = &c.root_module.resolved_target.?.result,
.output_mode = switch (c.kind) {
.lib => .Lib,
.obj, .test_obj => .Obj,

View File

@ -119,18 +119,12 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const install_artifact: *InstallArtifact = @fieldParentPtr("step", step);
const b = step.owner;
const cwd = fs.cwd();
var all_cached = true;
if (install_artifact.dest_dir) |dest_dir| {
const full_dest_path = b.getInstallPath(dest_dir, install_artifact.dest_sub_path);
const src_path = install_artifact.emitted_bin.?.getPath3(b, step);
const p = fs.Dir.updateFile(src_path.root_dir.handle, src_path.sub_path, cwd, full_dest_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
src_path.sub_path, full_dest_path, @errorName(err),
});
};
const p = try step.installFile(install_artifact.emitted_bin.?, full_dest_path);
all_cached = all_cached and p == .fresh;
if (install_artifact.dylib_symlinks) |dls| {
@ -141,48 +135,28 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
if (install_artifact.implib_dir) |implib_dir| {
const src_path = install_artifact.emitted_implib.?.getPath3(b, step);
const full_implib_path = b.getInstallPath(implib_dir, fs.path.basename(src_path.sub_path));
const p = fs.Dir.updateFile(src_path.root_dir.handle, src_path.sub_path, cwd, full_implib_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
src_path.sub_path, full_implib_path, @errorName(err),
});
};
const full_implib_path = b.getInstallPath(implib_dir, install_artifact.emitted_implib.?.basename(b, step));
const p = try step.installFile(install_artifact.emitted_implib.?, full_implib_path);
all_cached = all_cached and p == .fresh;
}
if (install_artifact.pdb_dir) |pdb_dir| {
const src_path = install_artifact.emitted_pdb.?.getPath3(b, step);
const full_pdb_path = b.getInstallPath(pdb_dir, fs.path.basename(src_path.sub_path));
const p = fs.Dir.updateFile(src_path.root_dir.handle, src_path.sub_path, cwd, full_pdb_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
src_path.sub_path, full_pdb_path, @errorName(err),
});
};
const full_pdb_path = b.getInstallPath(pdb_dir, install_artifact.emitted_pdb.?.basename(b, step));
const p = try step.installFile(install_artifact.emitted_pdb.?, full_pdb_path);
all_cached = all_cached and p == .fresh;
}
if (install_artifact.h_dir) |h_dir| {
if (install_artifact.emitted_h) |emitted_h| {
const src_path = emitted_h.getPath3(b, step);
const full_h_path = b.getInstallPath(h_dir, fs.path.basename(src_path.sub_path));
const p = fs.Dir.updateFile(src_path.root_dir.handle, src_path.sub_path, cwd, full_h_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
src_path.sub_path, full_h_path, @errorName(err),
});
};
const full_h_path = b.getInstallPath(h_dir, emitted_h.basename(b, step));
const p = try step.installFile(emitted_h, full_h_path);
all_cached = all_cached and p == .fresh;
}
for (install_artifact.artifact.installed_headers.items) |installation| switch (installation) {
.file => |file| {
const src_path = file.source.getPath3(b, step);
const full_h_path = b.getInstallPath(h_dir, file.dest_rel_path);
const p = fs.Dir.updateFile(src_path.root_dir.handle, src_path.sub_path, cwd, full_h_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
src_path.sub_path, full_h_path, @errorName(err),
});
};
const p = try step.installFile(file.source, full_h_path);
all_cached = all_cached and p == .fresh;
},
.directory => |dir| {
@ -209,16 +183,15 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
}
const src_entry_path = src_dir_path.join(b.allocator, entry.path) catch @panic("OOM");
const full_dest_path = b.pathJoin(&.{ full_h_prefix, entry.path });
switch (entry.kind) {
.directory => try cwd.makePath(full_dest_path),
.directory => {
try Step.handleVerbose(b, null, &.{ "install", "-d", full_dest_path });
const p = try step.installDir(full_dest_path);
all_cached = all_cached and p == .existed;
},
.file => {
const p = fs.Dir.updateFile(src_entry_path.root_dir.handle, src_entry_path.sub_path, cwd, full_dest_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
src_entry_path.sub_path, full_dest_path, @errorName(err),
});
};
const p = try step.installFile(try dir.source.join(b.allocator, entry.path), full_dest_path);
all_cached = all_cached and p == .fresh;
},
else => continue,

View File

@ -74,31 +74,23 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
var all_cached = true;
next_entry: while (try it.next()) |entry| {
for (install_dir.options.exclude_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) {
if (mem.endsWith(u8, entry.path, ext)) continue :next_entry;
}
if (install_dir.options.include_extensions) |incs| {
for (incs) |inc| {
if (mem.endsWith(u8, entry.path, inc)) break;
} else {
continue :next_entry;
}
}
if (install_dir.options.include_extensions) |incs| {
var found = false;
for (incs) |inc| {
if (mem.endsWith(u8, entry.path, inc)) {
found = true;
break;
}
}
if (!found) continue :next_entry;
}
// relative to src build root
const src_sub_path = try src_dir_path.join(arena, entry.path);
const src_path = try install_dir.options.source_dir.join(b.allocator, entry.path);
const dest_path = b.pathJoin(&.{ dest_prefix, entry.path });
const cwd = fs.cwd();
switch (entry.kind) {
.directory => {
if (need_derived_inputs) try step.addDirectoryWatchInputFromPath(src_sub_path);
try cwd.makePath(dest_path);
// TODO: set result_cached=false if the directory did not already exist.
if (need_derived_inputs) _ = try step.addDirectoryWatchInput(src_path);
const p = try step.installDir(dest_path);
all_cached = all_cached and p == .existed;
},
.file => {
for (install_dir.options.blank_extensions) |ext| {
@ -108,18 +100,8 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
}
const prev_status = fs.Dir.updateFile(
src_sub_path.root_dir.handle,
src_sub_path.sub_path,
cwd,
dest_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{}' to '{s}': {s}", .{
src_sub_path, dest_path, @errorName(err),
});
};
all_cached = all_cached and prev_status == .fresh;
const p = try step.installFile(src_path, dest_path);
all_cached = all_cached and p == .fresh;
},
else => continue,
}

View File

@ -41,13 +41,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const install_file: *InstallFile = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(install_file.source);
const full_src_path = install_file.source.getPath2(b, step);
const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path);
const cwd = std.fs.cwd();
const prev = std.fs.Dir.updateFile(cwd, full_src_path, cwd, full_dest_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{s}': {s}", .{
full_src_path, full_dest_path, @errorName(err),
});
};
step.result_cached = prev == .fresh;
const p = try step.installFile(install_file.source, full_dest_path);
step.result_cached = p == .fresh;
}

View File

@ -209,7 +209,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
}
if (objcopy.add_section) |section| {
try argv.append("--add-section");
try argv.appendSlice(&.{b.fmt("{s}={s}", .{ section.section_name, section.file_path.getPath(b) })});
try argv.appendSlice(&.{b.fmt("{s}={s}", .{ section.section_name, section.file_path.getPath2(b, step) })});
}
if (objcopy.set_section_alignment) |set_align| {
try argv.append("--set-section-alignment");

View File

@ -456,11 +456,28 @@ pub fn addPathDir(run: *Run, search_path: []const u8) void {
const b = run.step.owner;
const env_map = getEnvMapInternal(run);
const key = "PATH";
const use_wine = b.enable_wine and b.graph.host.result.os.tag != .windows and use_wine: switch (run.argv.items[0]) {
.artifact => |p| p.artifact.rootModuleTarget().os.tag == .windows,
.lazy_path => |p| {
switch (p.lazy_path) {
.generated => |g| if (g.file.step.cast(Step.Compile)) |cs| break :use_wine cs.rootModuleTarget().os.tag == .windows,
else => {},
}
break :use_wine std.mem.endsWith(u8, p.lazy_path.basename(b, &run.step), ".exe");
},
.decorated_directory => false,
.bytes => |bytes| std.mem.endsWith(u8, bytes, ".exe"),
.output_file, .output_directory => false,
};
const key = if (use_wine) "WINEPATH" else "PATH";
const prev_path = env_map.get(key);
if (prev_path) |pp| {
const new_path = b.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
const new_path = b.fmt("{s}{c}{s}", .{
pp,
if (use_wine) fs.path.delimiter_windows else fs.path.delimiter,
search_path,
});
env_map.put(key, new_path) catch @panic("OOM");
} else {
env_map.put(key, b.dupePath(search_path)) catch @panic("OOM");
@ -866,7 +883,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, null);
const dep_file_dir = std.fs.cwd();
const dep_file_basename = dep_output_file.generated_file.getPath();
const dep_file_basename = dep_output_file.generated_file.getPath2(b, step);
if (has_side_effects)
try man.addDepFile(dep_file_dir, dep_file_basename)
else
@ -1091,7 +1108,7 @@ fn runCommand(
const need_cross_libc = exe.is_linking_libc and
(root_target.isGnuLibC() or (root_target.isMuslLibC() and exe.linkage == .dynamic));
const other_target = exe.root_module.resolved_target.?.result;
switch (std.zig.system.getExternalExecutor(b.graph.host.result, &other_target, .{
switch (std.zig.system.getExternalExecutor(&b.graph.host.result, &other_target, .{
.qemu_fixes_dl = need_cross_libc and b.libc_runtimes_dir != null,
.link_libc = exe.is_linking_libc,
})) {

View File

@ -1074,7 +1074,7 @@ pub const ObjectFormat = enum {
}
};
pub fn toElfMachine(target: Target) std.elf.EM {
pub fn toElfMachine(target: *const Target) std.elf.EM {
return switch (target.cpu.arch) {
.amdgcn => .AMDGPU,
.arc => .ARC_COMPACT,
@ -1115,7 +1115,7 @@ pub fn toElfMachine(target: Target) std.elf.EM {
};
}
pub fn toCoffMachine(target: Target) std.coff.MachineType {
pub fn toCoffMachine(target: *const Target) std.coff.MachineType {
return switch (target.cpu.arch) {
.arm => .ARM,
.thumb => .ARMNT,
@ -1999,7 +1999,7 @@ pub const Cpu = struct {
}
};
pub fn zigTriple(target: Target, allocator: Allocator) Allocator.Error![]u8 {
pub fn zigTriple(target: *const Target, allocator: Allocator) Allocator.Error![]u8 {
return Query.fromTarget(target).zigTriple(allocator);
}
@ -2007,7 +2007,7 @@ pub fn hurdTupleSimple(allocator: Allocator, arch: Cpu.Arch, abi: Abi) ![]u8 {
return std.fmt.allocPrint(allocator, "{s}-{s}", .{ @tagName(arch), @tagName(abi) });
}
pub fn hurdTuple(target: Target, allocator: Allocator) ![]u8 {
pub fn hurdTuple(target: *const Target, allocator: Allocator) ![]u8 {
return hurdTupleSimple(allocator, target.cpu.arch, target.abi);
}
@ -2015,63 +2015,63 @@ pub fn linuxTripleSimple(allocator: Allocator, arch: Cpu.Arch, os_tag: Os.Tag, a
return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(arch), @tagName(os_tag), @tagName(abi) });
}
pub fn linuxTriple(target: Target, allocator: Allocator) ![]u8 {
pub fn linuxTriple(target: *const Target, allocator: Allocator) ![]u8 {
return linuxTripleSimple(allocator, target.cpu.arch, target.os.tag, target.abi);
}
pub fn exeFileExt(target: Target) [:0]const u8 {
pub fn exeFileExt(target: *const Target) [:0]const u8 {
return target.os.tag.exeFileExt(target.cpu.arch);
}
pub fn staticLibSuffix(target: Target) [:0]const u8 {
pub fn staticLibSuffix(target: *const Target) [:0]const u8 {
return target.os.tag.staticLibSuffix(target.abi);
}
pub fn dynamicLibSuffix(target: Target) [:0]const u8 {
pub fn dynamicLibSuffix(target: *const Target) [:0]const u8 {
return target.os.tag.dynamicLibSuffix();
}
pub fn libPrefix(target: Target) [:0]const u8 {
pub fn libPrefix(target: *const Target) [:0]const u8 {
return target.os.tag.libPrefix(target.abi);
}
pub inline fn isMinGW(target: Target) bool {
pub inline fn isMinGW(target: *const Target) bool {
return target.os.tag == .windows and target.abi.isGnu();
}
pub inline fn isGnuLibC(target: Target) bool {
pub inline fn isGnuLibC(target: *const Target) bool {
return switch (target.os.tag) {
.hurd, .linux => target.abi.isGnu(),
else => false,
};
}
pub inline fn isMuslLibC(target: Target) bool {
pub inline fn isMuslLibC(target: *const Target) bool {
return target.os.tag == .linux and target.abi.isMusl();
}
pub inline fn isDarwinLibC(target: Target) bool {
pub inline fn isDarwinLibC(target: *const Target) bool {
return switch (target.abi) {
.none, .macabi, .simulator => target.os.tag.isDarwin(),
else => false,
};
}
pub inline fn isFreeBSDLibC(target: Target) bool {
pub inline fn isFreeBSDLibC(target: *const Target) bool {
return switch (target.abi) {
.none, .eabihf => target.os.tag == .freebsd,
else => false,
};
}
pub inline fn isNetBSDLibC(target: Target) bool {
pub inline fn isNetBSDLibC(target: *const Target) bool {
return switch (target.abi) {
.none, .eabi, .eabihf => target.os.tag == .netbsd,
else => false,
};
}
pub inline fn isWasiLibC(target: Target) bool {
pub inline fn isWasiLibC(target: *const Target) bool {
return target.os.tag == .wasi and target.abi.isMusl();
}
@ -2576,7 +2576,7 @@ pub const DynamicLinker = struct {
}
};
pub fn standardDynamicLinkerPath(target: Target) DynamicLinker {
pub fn standardDynamicLinkerPath(target: *const Target) DynamicLinker {
return DynamicLinker.standard(target.cpu, target.os, target.abi);
}
@ -2645,11 +2645,11 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 {
};
}
pub fn ptrBitWidth(target: Target) u16 {
pub fn ptrBitWidth(target: *const Target) u16 {
return ptrBitWidth_cpu_abi(target.cpu, target.abi);
}
pub fn stackAlignment(target: Target) u16 {
pub fn stackAlignment(target: *const Target) u16 {
// Overrides for when the stack alignment is not equal to the pointer width.
switch (target.cpu.arch) {
.m68k,
@ -2697,7 +2697,7 @@ pub fn stackAlignment(target: Target) u16 {
/// Default signedness of `char` for the native C compiler for this target
/// Note that char signedness is implementation-defined and many compilers provide
/// an option to override the default signedness e.g. GCC's -funsigned-char / -fsigned-char
pub fn cCharSignedness(target: Target) std.builtin.Signedness {
pub fn cCharSignedness(target: *const Target) std.builtin.Signedness {
if (target.os.tag.isDarwin() or target.os.tag == .windows or target.os.tag == .uefi) return .signed;
return switch (target.cpu.arch) {
@ -2740,7 +2740,7 @@ pub const CType = enum {
longdouble,
};
pub fn cTypeByteSize(t: Target, c_type: CType) u16 {
pub fn cTypeByteSize(t: *const Target, c_type: CType) u16 {
return switch (c_type) {
.char,
.short,
@ -2766,7 +2766,7 @@ pub fn cTypeByteSize(t: Target, c_type: CType) u16 {
};
}
pub fn cTypeBitSize(target: Target, c_type: CType) u16 {
pub fn cTypeBitSize(target: *const Target, c_type: CType) u16 {
switch (target.os.tag) {
.freestanding, .other => switch (target.cpu.arch) {
.msp430 => switch (c_type) {
@ -3077,7 +3077,7 @@ pub fn cTypeBitSize(target: Target, c_type: CType) u16 {
}
}
pub fn cTypeAlignment(target: Target, c_type: CType) u16 {
pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 {
// Overrides for unusual alignments
switch (target.cpu.arch) {
.avr => return 1,
@ -3172,7 +3172,7 @@ pub fn cTypeAlignment(target: Target, c_type: CType) u16 {
);
}
pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 {
// Overrides for unusual alignments
switch (target.cpu.arch) {
.arc => switch (c_type) {
@ -3265,7 +3265,7 @@ pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
);
}
pub fn cMaxIntAlignment(target: std.Target) u16 {
pub fn cMaxIntAlignment(target: *const Target) u16 {
return switch (target.cpu.arch) {
.avr => 1,
@ -3328,7 +3328,7 @@ pub fn cMaxIntAlignment(target: std.Target) u16 {
};
}
pub fn cCallingConvention(target: Target) ?std.builtin.CallingConvention {
pub fn cCallingConvention(target: *const Target) ?std.builtin.CallingConvention {
return switch (target.cpu.arch) {
.x86_64 => switch (target.os.tag) {
.windows, .uefi => .{ .x86_64_win = .{} },

View File

@ -94,7 +94,7 @@ pub const OsVersion = union(enum) {
pub const SemanticVersion = std.SemanticVersion;
pub fn fromTarget(target: Target) Query {
pub fn fromTarget(target: *const Target) Query {
var result: Query = .{
.cpu_arch = target.cpu.arch,
.cpu_model = .{ .explicit = target.cpu.model },

View File

@ -9,7 +9,7 @@ const Arch = std.Target.Cpu.Arch;
///
/// See also `std.debug.SelfInfo.supportsUnwinding` which tells whether the Zig
/// standard library has a working implementation of unwinding for this target.
pub fn supportsUnwinding(target: std.Target) bool {
pub fn supportsUnwinding(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.amdgcn,
.nvptx,

View File

@ -1795,10 +1795,10 @@ fn spRegNum(reg_context: Dwarf.abi.RegisterContext) u8 {
const ip_reg_num = Dwarf.abi.ipRegNum(native_arch).?;
/// Tells whether unwinding for the host is implemented.
pub const supports_unwinding = supportsUnwinding(builtin.target);
pub const supports_unwinding = supportsUnwinding(&builtin.target);
comptime {
if (supports_unwinding) assert(Dwarf.abi.supportsUnwinding(builtin.target));
if (supports_unwinding) assert(Dwarf.abi.supportsUnwinding(&builtin.target));
}
/// Tells whether unwinding for this target is *implemented* here in the Zig
@ -1806,7 +1806,7 @@ comptime {
///
/// See also `Dwarf.abi.supportsUnwinding` which tells whether Dwarf supports
/// unwinding on that target *in theory*.
pub fn supportsUnwinding(target: std.Target) bool {
pub fn supportsUnwinding(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.x86 => switch (target.os.tag) {
.linux, .netbsd, .solaris, .illumos => true,

View File

@ -1146,6 +1146,7 @@ pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) MakeError!void {
/// On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
/// Fails on an empty path with `error.BadPathName` as that is not a path that can be created.
///
/// Paths containing `..` components are handled differently depending on the platform:
/// - On Windows, `..` are resolved before the path is passed to NtCreateFile, meaning
@ -1155,10 +1156,19 @@ pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) MakeError!void {
/// meaning a `sub_path` like "first/../second" will create both a `./first`
/// and a `./second` directory.
pub fn makePath(self: Dir, sub_path: []const u8) (MakeError || StatFileError)!void {
_ = try self.makePathStatus(sub_path);
}
pub const MakePathStatus = enum { existed, created };
/// Same as `makePath` except returns whether the path already existed or was successfully created.
pub fn makePathStatus(self: Dir, sub_path: []const u8) (MakeError || StatFileError)!MakePathStatus {
var it = try fs.path.componentIterator(sub_path);
var component = it.last() orelse return;
var status: MakePathStatus = .existed;
var component = it.last() orelse return error.BadPathName;
while (true) {
self.makeDir(component.path) catch |err| switch (err) {
if (self.makeDir(component.path)) |_| {
status = .created;
} else |err| switch (err) {
error.PathAlreadyExists => {
// stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
@ -1177,8 +1187,8 @@ pub fn makePath(self: Dir, sub_path: []const u8) (MakeError || StatFileError)!vo
continue;
},
else => |e| return e,
};
component = it.next() orelse return;
}
component = it.next() orelse return status;
}
}

View File

@ -27,8 +27,8 @@ const fs = std.fs;
const process = std.process;
const native_os = builtin.target.os.tag;
pub const sep_windows = '\\';
pub const sep_posix = '/';
pub const sep_windows: u8 = '\\';
pub const sep_posix: u8 = '/';
pub const sep = switch (native_os) {
.windows, .uefi => sep_windows,
else => sep_posix,
@ -41,8 +41,8 @@ pub const sep_str = switch (native_os) {
else => sep_str_posix,
};
pub const delimiter_windows = ';';
pub const delimiter_posix = ':';
pub const delimiter_windows: u8 = ';';
pub const delimiter_posix: u8 = ':';
pub const delimiter = if (native_os == .windows) delimiter_windows else delimiter_posix;
/// Returns if the given byte is a valid path separator

View File

@ -220,42 +220,61 @@ pub inline fn floatEpsAt(comptime T: type, x: T) T {
}
}
/// Returns the value inf for floating point type T.
pub inline fn inf(comptime T: type) T {
return reconstructFloat(T, floatExponentMax(T) + 1, mantissaOne(T));
/// Returns the inf value for a floating point `Type`.
pub inline fn inf(comptime Type: type) Type {
const RuntimeType = switch (Type) {
else => Type,
comptime_float => f128, // any float type will do
};
return reconstructFloat(RuntimeType, floatExponentMax(RuntimeType) + 1, mantissaOne(RuntimeType));
}
/// Returns the canonical quiet NaN representation for floating point type T.
pub inline fn nan(comptime T: type) T {
/// Returns the canonical quiet NaN representation for a floating point `Type`.
pub inline fn nan(comptime Type: type) Type {
const RuntimeType = switch (Type) {
else => Type,
comptime_float => f128, // any float type will do
};
return reconstructFloat(
T,
floatExponentMax(T) + 1,
mantissaOne(T) | 1 << (floatFractionalBits(T) - 1),
RuntimeType,
floatExponentMax(RuntimeType) + 1,
mantissaOne(RuntimeType) | 1 << (floatFractionalBits(RuntimeType) - 1),
);
}
/// Returns a signalling NaN representation for floating point type T.
/// Returns a signalling NaN representation for a floating point `Type`.
///
/// TODO: LLVM is known to miscompile on some architectures to quiet NaN -
/// this is tracked by https://github.com/ziglang/zig/issues/14366
pub inline fn snan(comptime T: type) T {
pub inline fn snan(comptime Type: type) Type {
const RuntimeType = switch (Type) {
else => Type,
comptime_float => f128, // any float type will do
};
return reconstructFloat(
T,
floatExponentMax(T) + 1,
mantissaOne(T) | 1 << (floatFractionalBits(T) - 2),
RuntimeType,
floatExponentMax(RuntimeType) + 1,
mantissaOne(RuntimeType) | 1 << (floatFractionalBits(RuntimeType) - 2),
);
}
test "float bits" {
inline for ([_]type{ f16, f32, f64, f80, f128, c_longdouble }) |T| {
// (1 +) for the sign bit, since it is separate from the other bits
const size = 1 + floatExponentBits(T) + floatMantissaBits(T);
try expect(@bitSizeOf(T) == size);
fn floatBits(comptime Type: type) !void {
// (1 +) for the sign bit, since it is separate from the other bits
const size = 1 + floatExponentBits(Type) + floatMantissaBits(Type);
try expect(@bitSizeOf(Type) == size);
try expect(floatFractionalBits(Type) <= floatMantissaBits(Type));
// for machine epsilon, assert expmin <= -prec <= expmax
try expect(floatExponentMin(T) <= -floatFractionalBits(T));
try expect(-floatFractionalBits(T) <= floatExponentMax(T));
}
// for machine epsilon, assert expmin <= -prec <= expmax
try expect(floatExponentMin(Type) <= -floatFractionalBits(Type));
try expect(-floatFractionalBits(Type) <= floatExponentMax(Type));
}
test floatBits {
try floatBits(f16);
try floatBits(f32);
try floatBits(f64);
try floatBits(f80);
try floatBits(f128);
try floatBits(c_longdouble);
}
test inf {

View File

@ -4,20 +4,47 @@ const expect = std.testing.expect;
/// Returns whether x is negative or negative 0.
pub fn signbit(x: anytype) bool {
const T = @TypeOf(x);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).float.bits);
return @as(TBits, @bitCast(x)) >> (@bitSizeOf(T) - 1) != 0;
return switch (@typeInfo(@TypeOf(x))) {
.int, .comptime_int => x,
.float => |float| @as(@Type(.{ .int = .{
.signedness = .signed,
.bits = float.bits,
} }), @bitCast(x)),
.comptime_float => @as(i128, @bitCast(@as(f128, x))), // any float type will do
else => @compileError("std.math.signbit does not support " ++ @typeName(@TypeOf(x))),
} < 0;
}
test signbit {
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
try expect(!signbit(@as(T, 0.0)));
try expect(!signbit(@as(T, 1.0)));
try expect(signbit(@as(T, -2.0)));
try expect(signbit(@as(T, -0.0)));
try expect(!signbit(math.inf(T)));
try expect(signbit(-math.inf(T)));
try expect(!signbit(math.nan(T)));
try expect(signbit(-math.nan(T)));
}
try testInts(i0);
try testInts(u0);
try testInts(i1);
try testInts(u1);
try testInts(i2);
try testInts(u2);
try testFloats(f16);
try testFloats(f32);
try testFloats(f64);
try testFloats(f80);
try testFloats(f128);
try testFloats(c_longdouble);
try testFloats(comptime_float);
}
fn testInts(comptime Type: type) !void {
try expect((std.math.minInt(Type) < 0) == signbit(@as(Type, std.math.minInt(Type))));
try expect(!signbit(@as(Type, 0)));
try expect(!signbit(@as(Type, std.math.maxInt(Type))));
}
fn testFloats(comptime Type: type) !void {
try expect(!signbit(@as(Type, 0.0)));
try expect(!signbit(@as(Type, 1.0)));
try expect(signbit(@as(Type, -2.0)));
try expect(signbit(@as(Type, -0.0)));
try expect(!signbit(math.inf(Type)));
try expect(signbit(-math.inf(Type)));
try expect(!signbit(math.nan(Type)));
try expect(signbit(-math.nan(Type)));
}

View File

@ -7571,7 +7571,10 @@ const lfs64_abi = native_os == .linux and builtin.link_libc and (builtin.abi.isG
/// If this happens the fix is to add the error code to the corresponding
/// switch expression, possibly introduce a new error in the error set, and
/// send a patch to Zig.
pub const unexpected_error_tracing = builtin.zig_backend == .stage2_llvm and builtin.mode == .Debug;
pub const unexpected_error_tracing = builtin.mode == .Debug and switch (builtin.zig_backend) {
.stage2_llvm, .stage2_x86_64 => true,
else => false,
};
pub const UnexpectedError = error{
/// The Operating System returned an undocumented error code.

View File

@ -485,6 +485,9 @@ fn _start() callconv(.naked) noreturn {
}
fn WinStartup() callconv(.withStackAlign(.c, 1)) noreturn {
// Switch from the x87 fpu state set by windows to the state expected by the gnu abi.
if (builtin.abi == .gnu) asm volatile ("fninit");
if (!builtin.single_threaded and !builtin.link_libc) {
_ = @import("os/windows/tls.zig");
}
@ -495,6 +498,9 @@ fn WinStartup() callconv(.withStackAlign(.c, 1)) noreturn {
}
fn wWinMainCRTStartup() callconv(.withStackAlign(.c, 1)) noreturn {
// Switch from the x87 fpu state set by windows to the state expected by the gnu abi.
if (builtin.abi == .gnu) asm volatile ("fninit");
if (!builtin.single_threaded and !builtin.link_libc) {
_ = @import("os/windows/tls.zig");
}

View File

@ -141,7 +141,7 @@ pub fn lineDelta(source: []const u8, start: usize, end: usize) isize {
pub const BinNameOptions = struct {
root_name: []const u8,
target: std.Target,
target: *const std.Target,
output_mode: std.builtin.OutputMode,
link_mode: ?std.builtin.LinkMode = null,
version: ?std.SemanticVersion = null,

View File

@ -15,7 +15,7 @@ pub const DarwinSdkLayout = enum {
pub fn detect(
arena: Allocator,
zig_lib_dir: []const u8,
target: std.Target,
target: *const std.Target,
is_native_abi: bool,
link_libc: bool,
libc_installation: ?*const LibCInstallation,
@ -88,7 +88,7 @@ pub fn detect(
};
}
fn detectFromInstallation(arena: Allocator, target: std.Target, lci: *const LibCInstallation) !LibCDirs {
fn detectFromInstallation(arena: Allocator, target: *const std.Target, lci: *const LibCInstallation) !LibCDirs {
var list = try std.ArrayList([]const u8).initCapacity(arena, 5);
var framework_list = std.ArrayList([]const u8).init(arena);
@ -146,7 +146,7 @@ fn detectFromInstallation(arena: Allocator, target: std.Target, lci: *const LibC
pub fn detectFromBuilding(
arena: Allocator,
zig_lib_dir: []const u8,
target: std.Target,
target: *const std.Target,
) !LibCDirs {
const s = std.fs.path.sep_str;
@ -224,7 +224,7 @@ pub fn detectFromBuilding(
};
}
fn libCGenericName(target: std.Target) [:0]const u8 {
fn libCGenericName(target: *const std.Target) [:0]const u8 {
switch (target.os.tag) {
.windows => return "mingw",
.macos, .ios, .tvos, .watchos, .visionos => return "darwin",

View File

@ -26,7 +26,7 @@ pub const FindError = error{
pub fn parse(
allocator: Allocator,
libc_file: []const u8,
target: std.Target,
target: *const std.Target,
) !LibCInstallation {
var self: LibCInstallation = .{};
@ -157,7 +157,7 @@ pub fn render(self: LibCInstallation, out: anytype) !void {
pub const FindNativeOptions = struct {
allocator: Allocator,
target: std.Target,
target: *const std.Target,
/// If enabled, will print human-friendly errors to stderr.
verbose: bool = false,
@ -700,7 +700,7 @@ pub const CrtBasenames = struct {
crtn: ?[]const u8 = null,
pub const GetArgs = struct {
target: std.Target,
target: *const std.Target,
link_libc: bool,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
@ -965,7 +965,7 @@ pub fn resolveCrtPaths(
lci: LibCInstallation,
arena: Allocator,
crt_basenames: CrtBasenames,
target: std.Target,
target: *const std.Target,
) error{ OutOfMemory, LibCInstallationMissingCrtDir }!CrtPaths {
const crt_dir_path: Path = .{
.root_dir = std.Build.Cache.Directory.cwd(),

View File

@ -66,7 +66,7 @@ pub const Options = struct {
allocator: Allocator,
strip: bool = true,
name: []const u8 = &.{},
target: std.Target = builtin.target,
target: *const std.Target = &builtin.target,
triple: []const u8 = &.{},
};

View File

@ -28,7 +28,7 @@ pub const GetExternalExecutorOptions = struct {
/// Return whether or not the given host is capable of running executables of
/// the other target.
pub fn getExternalExecutor(
host: std.Target,
host: *const std.Target,
candidate: *const std.Target,
options: GetExternalExecutorOptions,
) Executor {

View File

@ -13,7 +13,7 @@ framework_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
rpaths: std.ArrayListUnmanaged([]const u8) = .empty,
warnings: std.ArrayListUnmanaged([]const u8) = .empty,
pub fn detect(arena: Allocator, native_target: std.Target) !NativePaths {
pub fn detect(arena: Allocator, native_target: *const std.Target) !NativePaths {
var self: NativePaths = .{ .arena = arena };
var is_nix = false;
if (process.getEnvVarOwned(arena, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {

View File

@ -34,7 +34,7 @@ pub fn isSdkInstalled(allocator: Allocator) bool {
/// Caller owns the memory.
/// stderr from xcrun is ignored.
/// If error.OutOfMemory occurs in Allocator, this function returns null.
pub fn getSdk(allocator: Allocator, target: Target) ?[]const u8 {
pub fn getSdk(allocator: Allocator, target: *const Target) ?[]const u8 {
const is_simulator_abi = target.abi == .simulator;
const sdk = switch (target.os.tag) {
.ios => switch (target.abi) {

View File

@ -116,7 +116,7 @@ pub const freebsd_libc_version: std.SemanticVersion = .{ .major = 14, .minor = 0
/// The version of Zig's bundled NetBSD libc used when linking libc statically.
pub const netbsd_libc_version: std.SemanticVersion = .{ .major = 10, .minor = 1, .patch = 0 };
pub fn canBuildLibC(target: std.Target) bool {
pub fn canBuildLibC(target: *const std.Target) bool {
for (available_libcs) |libc| {
if (target.cpu.arch == libc.arch and target.os.tag == libc.os and target.abi == libc.abi) {
if (libc.os_ver) |libc_os_ver| {
@ -176,7 +176,7 @@ pub fn muslRuntimeTriple(
return std.Target.linuxTripleSimple(allocator, arch, .linux, abi);
}
pub fn osArchName(target: std.Target) [:0]const u8 {
pub fn osArchName(target: *const std.Target) [:0]const u8 {
return switch (target.os.tag) {
.linux => switch (target.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => "arm",
@ -276,7 +276,7 @@ pub fn netbsdAbiNameHeaders(abi: std.Target.Abi) [:0]const u8 {
};
}
pub fn isLibCLibName(target: std.Target, name: []const u8) bool {
pub fn isLibCLibName(target: *const std.Target, name: []const u8) bool {
const ignore_case = target.os.tag.isDarwin() or target.os.tag == .windows;
if (eqlIgnoreCase(ignore_case, name, "c"))
@ -453,7 +453,7 @@ pub fn isLibCLibName(target: std.Target, name: []const u8) bool {
return false;
}
pub fn isLibCxxLibName(target: std.Target, name: []const u8) bool {
pub fn isLibCxxLibName(target: *const std.Target, name: []const u8) bool {
const ignore_case = target.os.tag.isDarwin() or target.os.tag == .windows;
return eqlIgnoreCase(ignore_case, name, "c++") or
@ -470,11 +470,11 @@ fn eqlIgnoreCase(ignore_case: bool, a: []const u8, b: []const u8) bool {
}
}
pub fn intByteSize(target: std.Target, bits: u16) u19 {
pub fn intByteSize(target: *const std.Target, bits: u16) u19 {
return std.mem.alignForward(u19, @intCast((@as(u17, bits) + 7) / 8), intAlignment(target, bits));
}
pub fn intAlignment(target: std.Target, bits: u16) u16 {
pub fn intAlignment(target: *const std.Target, bits: u16) u16 {
return switch (target.cpu.arch) {
.x86 => switch (bits) {
0 => 0,

View File

@ -122,8 +122,10 @@ pub const Feature = enum {
/// Legalize (shift lhs, (splat rhs)) -> (shift lhs, rhs)
unsplat_shift_rhs,
/// Legalize reduce of a one element vector to a bitcast
/// Legalize reduce of a one element vector to a bitcast.
reduce_one_elem_to_bitcast,
/// Legalize splat to a one element vector to a bitcast.
splat_one_elem_to_bitcast,
/// Replace `intcast_safe` with an explicit safety check which `call`s the panic function on failure.
/// Not compatible with `scalarize_intcast_safe`.
@ -628,7 +630,17 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
else => {},
}
},
.splat => {},
.splat => if (l.features.has(.splat_one_elem_to_bitcast)) {
const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op;
switch (ty_op.ty.toType().vectorLen(zcu)) {
0 => unreachable,
1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{
.ty = ty_op.ty,
.operand = ty_op.operand,
} }),
else => {},
}
},
.shuffle_one => if (l.features.has(.scalarize_shuffle_one)) continue :inst try l.scalarize(inst, .shuffle_one),
.shuffle_two => if (l.features.has(.scalarize_shuffle_two)) continue :inst try l.scalarize(inst, .shuffle_two),
.select => if (l.features.has(.scalarize_select)) continue :inst try l.scalarize(inst, .select),

View File

@ -224,6 +224,8 @@ compiler_rt_lib: ?CrtFile = null,
/// Populated when we build the compiler_rt_obj object. A Job to build this is indicated
/// by setting `queued_jobs.compiler_rt_obj` and resolved before calling linker.flush().
compiler_rt_obj: ?CrtFile = null,
/// hack for stage2_x86_64 + coff
compiler_rt_dyn_lib: ?CrtFile = null,
/// Populated when we build the libfuzzer static library. A Job to build this
/// is indicated by setting `queued_jobs.fuzzer_lib` and resolved before
/// calling linker.flush().
@ -291,6 +293,8 @@ emit_llvm_bc: ?[]const u8,
emit_docs: ?[]const u8,
const QueuedJobs = struct {
/// hack for stage2_x86_64 + coff
compiler_rt_dyn_lib: bool = false,
compiler_rt_lib: bool = false,
compiler_rt_obj: bool = false,
ubsan_rt_lib: bool = false,
@ -1361,7 +1365,7 @@ pub const cache_helpers = struct {
hh: *Cache.HashHelper,
resolved_target: Package.Module.ResolvedTarget,
) void {
const target = resolved_target.result;
const target = &resolved_target.result;
hh.add(target.cpu.arch);
hh.addBytes(target.cpu.model.name);
hh.add(target.cpu.features.ints);
@ -1705,7 +1709,7 @@ pub const CreateOptions = struct {
assert(opts.cache_mode != .none);
return try ea.cacheName(arena, .{
.root_name = opts.root_name,
.target = opts.root_mod.resolved_target.result,
.target = &opts.root_mod.resolved_target.result,
.output_mode = opts.config.output_mode,
.link_mode = opts.config.link_mode,
.version = opts.version,
@ -1753,7 +1757,7 @@ fn addModuleTableToCacheHash(
}
}
const RtStrat = enum { none, lib, obj, zcu };
const RtStrat = enum { none, lib, obj, zcu, dyn_lib };
pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compilation {
const output_mode = options.config.output_mode;
@ -1772,14 +1776,14 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
const have_zcu = options.config.have_zcu;
const use_llvm = options.config.use_llvm;
const target = &options.root_mod.resolved_target.result;
const comp: *Compilation = comp: {
// We put the `Compilation` itself in the arena. Freeing the arena will free the module.
// It's initialized later after we prepare the initialization options.
const root_name = try arena.dupeZ(u8, options.root_name);
const use_llvm = options.config.use_llvm;
// The "any" values provided by resolved config only account for
// explicitly-provided settings. We now make them additionally account
// for default setting resolution.
@ -1804,7 +1808,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const libc_dirs = try std.zig.LibCDirs.detect(
arena,
options.dirs.zig_lib.path.?,
options.root_mod.resolved_target.result,
target,
options.root_mod.resolved_target.is_native_abi,
link_libc,
options.libc_installation,
@ -1816,7 +1820,11 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (options.skip_linker_dependencies) break :s .none;
const want = options.want_compiler_rt orelse is_exe_or_dyn_lib;
if (!want) break :s .none;
if (have_zcu and output_mode == .Obj) break :s .zcu;
if (have_zcu) {
if (output_mode == .Obj) break :s .zcu;
if (target.ofmt == .coff and target_util.zigBackend(target, use_llvm) == .stage2_x86_64)
break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
}
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
};
@ -1846,7 +1854,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// approach, since the ubsan runtime uses quite a lot of the standard library
// and this reduces unnecessary bloat.
const ubsan_rt_strat: RtStrat = s: {
const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(options.root_mod.resolved_target.result);
const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target);
const want_ubsan_rt = options.want_ubsan_rt orelse (can_build_ubsan_rt and any_sanitize_c == .full and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;
@ -1872,7 +1880,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (options.verbose_llvm_cpu_features) {
if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
const target = options.root_mod.resolved_target.result;
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
@ -1991,9 +1998,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
errdefer if (opt_zcu) |zcu| zcu.deinit();
var windows_libs = try std.StringArrayHashMapUnmanaged(void).init(gpa, options.windows_lib_names, &.{});
errdefer windows_libs.deinit(gpa);
comp.* = .{
.gpa = gpa,
.arena = arena,
@ -2038,7 +2042,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.incremental = options.incremental,
.root_name = root_name,
.sysroot = sysroot,
.windows_libs = windows_libs,
.windows_libs = .empty,
.version = options.version,
.libc_installation = libc_dirs.libc_installation,
.compiler_rt_strat = compiler_rt_strat,
@ -2066,6 +2070,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.emit_docs = try options.emit_docs.resolve(arena, &options, .docs),
};
errdefer {
for (comp.windows_libs.keys()) |windows_lib| gpa.free(windows_lib);
comp.windows_libs.deinit(gpa);
}
try comp.windows_libs.ensureUnusedCapacity(gpa, options.windows_lib_names.len);
for (options.windows_lib_names) |windows_lib| comp.windows_libs.putAssumeCapacity(try gpa.dupe(u8, windows_lib), {});
// Prevent some footguns by making the "any" fields of config reflect
// the default Module settings.
comp.config.any_unwind_tables = any_unwind_tables;
@ -2244,8 +2255,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
errdefer comp.destroy();
const target = comp.root_mod.resolved_target.result;
const can_build_compiler_rt = target_util.canBuildLibCompilerRt(target, comp.config.use_llvm, build_options.have_llvm);
const can_build_compiler_rt = target_util.canBuildLibCompilerRt(target, use_llvm, build_options.have_llvm);
// Add a `CObject` for each `c_source_files`.
try comp.c_object_table.ensureTotalCapacity(gpa, options.c_source_files.len);
@ -2344,7 +2354,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.glibc_shared_objects = true;
comp.link_task_queue.pending_prelink_tasks += glibc.sharedObjectsCount(&target);
comp.link_task_queue.pending_prelink_tasks += glibc.sharedObjectsCount(target);
comp.queued_jobs.glibc_crt_file[@intFromEnum(glibc.CrtFile.libc_nonshared_a)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
@ -2389,7 +2399,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// When linking mingw-w64 there are some import libs we always need.
try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(name, {});
for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(try gpa.dupe(u8, name), {});
} else {
return error.LibCUnavailable;
}
@ -2439,6 +2449,11 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// for a compiler-rt object to put in it.
comp.queued_jobs.compiler_rt_obj = true;
comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.compiler_rt_strat == .dyn_lib) {
// hack for stage2_x86_64 + coff
log.debug("queuing a job to build compiler_rt_dyn_lib", .{});
comp.queued_jobs.compiler_rt_dyn_lib = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
if (comp.ubsan_rt_strat == .lib) {
@ -2482,6 +2497,7 @@ pub fn destroy(comp: *Compilation) void {
comp.c_object_work_queue.deinit();
comp.win32_resource_work_queue.deinit();
for (comp.windows_libs.keys()) |windows_lib| gpa.free(windows_lib);
comp.windows_libs.deinit(gpa);
{
@ -2571,8 +2587,8 @@ pub fn clearMiscFailures(comp: *Compilation) void {
comp.misc_failures = .{};
}
pub fn getTarget(self: Compilation) Target {
return self.root_mod.resolved_target.result;
pub fn getTarget(self: *const Compilation) *const Target {
return &self.root_mod.resolved_target.result;
}
/// Only legal to call when cache mode is incremental and a link file is present.
@ -3210,7 +3226,7 @@ fn addNonIncrementalStuffToCacheManifest(
man.hash.addOptional(opts.image_base);
man.hash.addOptional(opts.gc_sections);
man.hash.add(opts.emit_relocs);
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
if (target.ofmt == .macho or target.ofmt == .coff) {
// TODO remove this, libraries need to be resolved by the frontend. this is already
// done by ELF.
@ -4251,6 +4267,7 @@ fn performAllTheWork(
"compiler_rt.zig",
"compiler_rt",
.Lib,
.static,
.compiler_rt,
main_progress_node,
RtOptions{
@ -4267,6 +4284,7 @@ fn performAllTheWork(
"compiler_rt.zig",
"compiler_rt",
.Obj,
.static,
.compiler_rt,
main_progress_node,
RtOptions{
@ -4277,12 +4295,31 @@ fn performAllTheWork(
});
}
// hack for stage2_x86_64 + coff
if (comp.queued_jobs.compiler_rt_dyn_lib and comp.compiler_rt_dyn_lib == null) {
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
"compiler_rt",
.Lib,
.dynamic,
.compiler_rt,
main_progress_node,
RtOptions{
.checks_valgrind = true,
.allow_lto = false,
},
&comp.compiler_rt_dyn_lib,
});
}
if (comp.queued_jobs.fuzzer_lib and comp.fuzzer_lib == null) {
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"fuzzer.zig",
"fuzzer",
.Lib,
.static,
.libfuzzer,
main_progress_node,
RtOptions{},
@ -4296,6 +4333,7 @@ fn performAllTheWork(
"ubsan_rt.zig",
"ubsan_rt",
.Lib,
.static,
.libubsan,
main_progress_node,
RtOptions{
@ -4311,6 +4349,7 @@ fn performAllTheWork(
"ubsan_rt.zig",
"ubsan_rt",
.Obj,
.static,
.libubsan,
main_progress_node,
RtOptions{
@ -5387,6 +5426,7 @@ fn buildRt(
root_source_name: []const u8,
root_name: []const u8,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
misc_task: MiscTask,
prog_node: std.Progress.Node,
options: RtOptions,
@ -5396,6 +5436,7 @@ fn buildRt(
root_source_name,
root_name,
output_mode,
link_mode,
misc_task,
prog_node,
options,
@ -5551,6 +5592,7 @@ fn buildLibZigC(comp: *Compilation, prog_node: std.Progress.Node) void {
"c.zig",
"zigc",
.Lib,
.static,
.libzigc,
prog_node,
.{},
@ -6270,7 +6312,7 @@ pub fn addCCArgs(
out_dep_path: ?[]const u8,
mod: *Package.Module,
) !void {
const target = mod.resolved_target.result;
const target = &mod.resolved_target.result;
// As of Clang 16.x, it will by default read extra flags from /etc/clang.
// I'm sure the person who implemented this means well, but they have a lot
@ -6944,7 +6986,7 @@ pub const FileExt = enum {
};
}
pub fn canonicalName(ext: FileExt, target: Target) [:0]const u8 {
pub fn canonicalName(ext: FileExt, target: *const Target) [:0]const u8 {
return switch (ext) {
.c => ".c",
.cpp => ".cpp",
@ -7187,7 +7229,7 @@ pub fn dump_argv(argv: []const []const u8) void {
}
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
return target_util.zigBackend(target, comp.config.use_llvm);
}
@ -7228,6 +7270,7 @@ fn buildOutputFromZig(
src_basename: []const u8,
root_name: []const u8,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
misc_task_tag: MiscTask,
prog_node: std.Progress.Node,
options: RtOptions,
@ -7248,7 +7291,7 @@ fn buildOutputFromZig(
const config = try Config.resolve(.{
.output_mode = output_mode,
.link_mode = .static,
.link_mode = link_mode,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
.have_zcu = true,
@ -7371,7 +7414,7 @@ pub fn build_crt_file(
const basename = try std.zig.binNameAlloc(gpa, .{
.root_name = root_name,
.target = comp.root_mod.resolved_target.result,
.target = &comp.root_mod.resolved_target.result,
.output_mode = output_mode,
});
@ -7523,13 +7566,13 @@ pub fn getCrtPaths(
comp: *Compilation,
arena: Allocator,
) error{ OutOfMemory, LibCInstallationMissingCrtDir }!LibCInstallation.CrtPaths {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
return getCrtPathsInner(arena, target, comp.config, comp.libc_installation, &comp.crt_files);
}
fn getCrtPathsInner(
arena: Allocator,
target: std.Target,
target: *const std.Target,
config: Config,
libc_installation: ?*const LibCInstallation,
crt_files: *std.StringHashMapUnmanaged(CrtFile),
@ -7558,14 +7601,19 @@ pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
// then when we create a sub-Compilation for zig libc, it also tries to
// build kernel32.lib.
if (comp.skip_linker_dependencies) return;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
if (target.os.tag != .windows or target.ofmt == .c) return;
// This happens when an `extern "foo"` function is referenced.
// If we haven't seen this library yet and we're targeting Windows, we need
// to queue up a work item to produce the DLL import library for this.
const gop = try comp.windows_libs.getOrPut(comp.gpa, lib_name);
if (!gop.found_existing) try comp.queueJob(.{ .windows_import_lib = comp.windows_libs.count() - 1 });
if (gop.found_existing) return;
{
errdefer _ = comp.windows_libs.pop();
gop.key_ptr.* = try comp.gpa.dupe(u8, lib_name);
}
try comp.queueJob(.{ .windows_import_lib = gop.index });
}
/// This decides the optimization mode for all zig-provided libraries, including
@ -7574,7 +7622,7 @@ pub fn compilerRtOptMode(comp: Compilation) std.builtin.OptimizeMode {
if (comp.debug_compiler_runtime_libs) {
return comp.root_mod.optimize_mode;
}
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
switch (comp.root_mod.optimize_mode) {
.Debug, .ReleaseSafe => return target_util.defaultCompilerRtOptimizeMode(target),
.ReleaseFast => return .ReleaseFast,

View File

@ -150,7 +150,7 @@ pub const ResolveError = error{
};
pub fn resolve(options: Options) ResolveError!Config {
const target = options.resolved_target.result;
const target = &options.resolved_target.result;
// WASI-only. Resolve the optional exec-model option, defaults to command.
if (target.os.tag != .wasi and options.wasi_exec_model != null)

View File

@ -102,7 +102,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
if (options.inherited.error_tracing == true) assert(options.global.any_error_tracing);
const resolved_target = options.inherited.resolved_target orelse options.parent.?.resolved_target;
const target = resolved_target.result;
const target = &resolved_target.result;
const optimize_mode = options.inherited.optimize_mode orelse
if (options.parent) |p| p.optimize_mode else options.global.root_optimize_mode;
@ -363,7 +363,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.root_src_path = options.paths.root_src_path,
.fully_qualified_name = options.fully_qualified_name,
.resolved_target = .{
.result = target,
.result = target.*,
.is_native_os = resolved_target.is_native_os,
.is_native_abi = resolved_target.is_native_abi,
.is_explicit_dynamic_linker = resolved_target.is_explicit_dynamic_linker,
@ -474,7 +474,7 @@ pub fn getBuiltinOptions(m: Module, global: Compilation.Config) Builtin {
assert(global.have_zcu);
return .{
.target = m.resolved_target.result,
.zig_backend = target_util.zigBackend(m.resolved_target.result, global.use_llvm),
.zig_backend = target_util.zigBackend(&m.resolved_target.result, global.use_llvm),
.output_mode = global.output_mode,
.link_mode = global.link_mode,
.unwind_tables = m.unwind_tables,

View File

@ -29912,7 +29912,7 @@ pub fn coerceInMemoryAllowed(
/// load from the `*Src` to effectively perform an in-memory coercion from `Dest` to `Src`.
/// Therefore, when `dest_is_mut`, the in-memory coercion must be valid in *both directions*.
dest_is_mut: bool,
target: std.Target,
target: *const std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
src_val: ?Value,
@ -30271,7 +30271,7 @@ fn coerceInMemoryAllowedFns(
src_ty: Type,
/// If set, the coercion must be valid in both directions.
dest_is_mut: bool,
target: std.Target,
target: *const std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {
@ -30380,7 +30380,7 @@ fn coerceInMemoryAllowedFns(
}
fn callconvCoerceAllowed(
target: std.Target,
target: *const std.Target,
src_cc: std.builtin.CallingConvention,
dest_cc: std.builtin.CallingConvention,
) bool {
@ -30426,7 +30426,7 @@ fn coerceInMemoryAllowedPtrs(
src_ptr_ty: Type,
/// If set, the coercion must be valid in both directions.
dest_is_mut: bool,
target: std.Target,
target: *const std.Target,
dest_src: LazySrcLoc,
src_src: LazySrcLoc,
) !InMemoryCoercionResult {

View File

@ -1602,7 +1602,7 @@ fn abiSizeInnerOptional(
};
}
pub fn ptrAbiAlignment(target: Target) Alignment {
pub fn ptrAbiAlignment(target: *const Target) Alignment {
return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8));
}
@ -2395,7 +2395,7 @@ pub fn isAnyFloat(ty: Type) bool {
/// Asserts the type is a fixed-size float or comptime_float.
/// Returns 128 for comptime_float types.
pub fn floatBits(ty: Type, target: Target) u16 {
pub fn floatBits(ty: Type, target: *const Target) u16 {
return switch (ty.toIntern()) {
.f16_type => 16,
.f32_type => 32,
@ -4188,6 +4188,6 @@ pub fn smallestUnsignedBits(max: u64) u16 {
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
fn cTypeAlign(target: Target, c_type: Target.CType) Alignment {
fn cTypeAlign(target: *const Target, c_type: Target.CType) Alignment {
return Alignment.fromByteUnits(target.cTypeAlignment(c_type));
}

View File

@ -3773,8 +3773,8 @@ pub fn errNote(
/// Deprecated. There is no global target for a Zig Compilation Unit. Instead,
/// look up the target based on the Module that contains the source code being
/// analyzed.
pub fn getTarget(zcu: *const Zcu) Target {
return zcu.root_mod.resolved_target.result;
pub fn getTarget(zcu: *const Zcu) *const Target {
return &zcu.root_mod.resolved_target.result;
}
/// Deprecated. There is no global optimization mode for a Zig Compilation
@ -3863,7 +3863,7 @@ pub const Feature = enum {
};
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
return target_util.backendSupportsFeature(backend, feature);
}

View File

@ -4382,7 +4382,7 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou
error.CodegenFail => zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav),
error.NoLinkFile => assert(zcu.comp.bin_file == null),
error.BackendDoesNotProduceMir => switch (target_util.zigBackend(
zcu.root_mod.resolved_target.result,
&zcu.root_mod.resolved_target.result,
zcu.comp.config.use_llvm,
)) {
else => unreachable, // assertion failure

View File

@ -6175,7 +6175,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
self.pt,
self.src_loc,
val,
self.target.*,
self.target,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@ -6379,7 +6379,7 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register {
},
.stack_pointer => unreachable, // we can't store/load the sp
.floating_point => {
return switch (ty.floatBits(self.target.*)) {
return switch (ty.floatBits(self.target)) {
16 => reg.toH(),
32 => reg.toS(),
64 => reg.toD(),

View File

@ -6148,7 +6148,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
pt,
self.src_loc,
val,
self.target.*,
self.target,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,

View File

@ -1881,7 +1881,7 @@ fn memSize(func: *Func, ty: Type) Memory.Size {
const pt = func.pt;
const zcu = pt.zcu;
return switch (ty.zigTypeTag(zcu)) {
.float => Memory.Size.fromBitSize(ty.floatBits(func.target.*)),
.float => Memory.Size.fromBitSize(ty.floatBits(func.target)),
else => Memory.Size.fromByteSize(ty.abiSize(zcu)),
};
}
@ -2401,7 +2401,7 @@ fn binOp(
const rhs_ty = func.typeOf(rhs_air);
if (lhs_ty.isRuntimeFloat()) libcall: {
const float_bits = lhs_ty.floatBits(func.target.*);
const float_bits = lhs_ty.floatBits(func.target);
const type_needs_libcall = switch (float_bits) {
16 => true,
32, 64 => false,
@ -5189,7 +5189,7 @@ fn airCmp(func: *Func, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
}
},
.float => {
const float_bits = lhs_ty.floatBits(func.target.*);
const float_bits = lhs_ty.floatBits(func.target);
const float_reg_size: u32 = if (func.hasFeature(.d)) 64 else 32;
if (float_bits > float_reg_size) {
return func.fail("TODO: airCmp float > 64/32 bits", .{});
@ -5962,10 +5962,14 @@ fn airBr(func: *Func, inst: Air.Inst.Index) !void {
if (first_br) break :result src_mcv;
try func.getValue(block_tracking.short, br.block_inst);
// .long = .none to avoid merging operand and block result stack frames.
const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv };
try current_tracking.materializeUnsafe(func, br.block_inst, block_tracking.*);
for (current_tracking.getRegs()) |src_reg| func.register_manager.freeReg(src_reg);
try InstTracking.materializeUnsafe(
// .long = .none to avoid merging operand and block result stack frames.
.{ .long = .none, .short = src_mcv },
func,
br.block_inst,
block_tracking.*,
);
try func.freeValue(src_mcv);
break :result block_tracking.short;
}
@ -8192,10 +8196,13 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
const lf = func.bin_file;
const src_loc = func.src_loc;
const result = if (val.isUndef(pt.zcu))
try lf.lowerUav(pt, val.toIntern(), .none, src_loc)
const result: codegen.GenResult = if (val.isUndef(pt.zcu))
switch (try lf.lowerUav(pt, val.toIntern(), .none, src_loc)) {
.sym_index => |sym_index| .{ .mcv = .{ .load_symbol = sym_index } },
.fail => |em| .{ .fail = em },
}
else
try codegen.genTypedValue(lf, pt, src_loc, val, func.target.*);
try codegen.genTypedValue(lf, pt, src_loc, val, func.target);
const mcv: MCValue = switch (result) {
.mcv => |mcv| switch (mcv) {
.none => .none,
@ -8484,7 +8491,7 @@ fn promoteInt(func: *Func, ty: Type) Type {
fn promoteVarArg(func: *Func, ty: Type) Type {
if (!ty.isRuntimeFloat()) return func.promoteInt(ty);
switch (ty.floatBits(func.target.*)) {
switch (ty.floatBits(func.target)) {
32, 64 => return Type.f64,
else => |float_bits| {
assert(float_bits == func.target.cTypeBitSize(.longdouble));

View File

@ -4088,7 +4088,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
pt,
self.src_loc,
val,
self.target.*,
self.target,
)) {
.mcv => |mcv| switch (mcv) {
.none => .none,

View File

@ -982,7 +982,7 @@ fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
pub fn typeToValtype(ty: Type, zcu: *const Zcu, target: *const std.Target) std.wasm.Valtype {
const ip = &zcu.intern_pool;
return switch (ty.zigTypeTag(zcu)) {
.float => switch (ty.floatBits(target.*)) {
.float => switch (ty.floatBits(target)) {
16 => .i32, // stored/loaded as u16
32 => .f32,
64 => .f64,
@ -1715,7 +1715,7 @@ fn isByRef(ty: Type, zcu: *const Zcu, target: *const std.Target) bool {
.vector => return determineSimdStoreStrategy(ty, zcu, target) == .unrolled,
.int => return ty.intInfo(zcu).bits > 64,
.@"enum" => return ty.intInfo(zcu).bits > 64,
.float => return ty.floatBits(target.*) > 64,
.float => return ty.floatBits(target) > 64,
.error_union => {
const pl_ty = ty.errorUnionPayload(zcu);
if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
@ -2904,7 +2904,7 @@ fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) Inne
return cg.fail("TODO: Implement floatOps for vectors", .{});
}
const float_bits = ty.floatBits(cg.target.*);
const float_bits = ty.floatBits(cg.target);
if (float_op == .neg) {
return cg.floatNeg(ty, args[0]);
@ -2931,7 +2931,7 @@ fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) Inne
/// NOTE: The result value remains on top of the stack.
fn floatNeg(cg: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
const float_bits = ty.floatBits(cg.target.*);
const float_bits = ty.floatBits(cg.target);
switch (float_bits) {
16 => {
try cg.emitWValue(arg);
@ -3300,7 +3300,7 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
else => unreachable,
},
.float => switch (ty.floatBits(cg.target.*)) {
.float => switch (ty.floatBits(cg.target)) {
16 => return .{ .imm32 = 0xaaaaaaaa },
32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
@ -3507,7 +3507,7 @@ fn cmp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOpe
/// Compares two floats.
/// NOTE: Leaves the result of the comparison on top of the stack.
fn cmpFloat(cg: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
const float_bits = ty.floatBits(cg.target.*);
const float_bits = ty.floatBits(cg.target);
const op: Op = switch (cmp_op) {
.lt => .lt,
@ -4919,7 +4919,7 @@ fn airIntFromFloat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try cg.resolveInst(ty_op.operand);
const op_ty = cg.typeOf(ty_op.operand);
const op_bits = op_ty.floatBits(cg.target.*);
const op_bits = op_ty.floatBits(cg.target);
const dest_ty = cg.typeOfIndex(inst);
const dest_info = dest_ty.intInfo(zcu);
@ -4973,7 +4973,7 @@ fn airFloatFromInt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const op_info = op_ty.intInfo(zcu);
const dest_ty = cg.typeOfIndex(inst);
const dest_bits = dest_ty.floatBits(cg.target.*);
const dest_bits = dest_ty.floatBits(cg.target);
if (op_info.bits > 128) {
return cg.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
@ -5567,8 +5567,8 @@ fn airFpext(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Extends a float from a given `Type` to a larger wanted `Type`, leaving the
/// result on the stack.
fn fpext(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_bits = given.floatBits(cg.target.*);
const wanted_bits = wanted.floatBits(cg.target.*);
const given_bits = given.floatBits(cg.target);
const wanted_bits = wanted.floatBits(cg.target);
const intrinsic: Mir.Intrinsic = switch (given_bits) {
16 => switch (wanted_bits) {
@ -5621,8 +5621,8 @@ fn airFptrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
/// Truncates a float from a given `Type` to its wanted `Type`, leaving the
/// result on the stack.
fn fptrunc(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
const given_bits = given.floatBits(cg.target.*);
const wanted_bits = wanted.floatBits(cg.target.*);
const given_bits = given.floatBits(cg.target);
const wanted_bits = wanted.floatBits(cg.target);
const intrinsic: Mir.Intrinsic = switch (given_bits) {
32 => switch (wanted_bits) {
@ -6231,7 +6231,7 @@ fn airMaxMin(
if (ty.zigTypeTag(zcu) == .float) {
const intrinsic = switch (op) {
inline .fmin, .fmax => |ct_op| switch (ty.floatBits(cg.target.*)) {
inline .fmin, .fmax => |ct_op| switch (ty.floatBits(cg.target)) {
inline 16, 32, 64, 80, 128 => |bits| @field(
Mir.Intrinsic,
libcFloatPrefix(bits) ++ @tagName(ct_op) ++ libcFloatSuffix(bits),
@ -6268,7 +6268,7 @@ fn airMulAdd(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const lhs = try cg.resolveInst(bin_op.lhs);
const rhs = try cg.resolveInst(bin_op.rhs);
const result = if (ty.floatBits(cg.target.*) == 16) fl_result: {
const result = if (ty.floatBits(cg.target) == 16) fl_result: {
const rhs_ext = try cg.fpext(rhs, ty, Type.f32);
const lhs_ext = try cg.fpext(lhs, ty, Type.f32);
const addend_ext = try cg.fpext(addend, ty, Type.f32);
@ -6667,7 +6667,7 @@ fn airDivFloor(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
_ = try cg.wrapOperand(.stack, ty);
}
} else {
const float_bits = ty.floatBits(cg.target.*);
const float_bits = ty.floatBits(cg.target);
if (float_bits > 64) {
return cg.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
}

File diff suppressed because it is too large Load Diff

View File

@ -105,9 +105,9 @@ pub fn emitMir(emit: *Emit) Error!void {
emit.pt,
emit.lower.src_loc,
nav,
emit.lower.target.*,
emit.lower.target,
)) {
.mcv => |mcv| mcv.lea_symbol,
.sym_index => |sym_index| sym_index,
.fail => |em| {
assert(emit.lower.err_msg == null);
emit.lower.err_msg = em;
@ -151,7 +151,7 @@ pub fn emitMir(emit: *Emit) Error!void {
Type.fromInterned(uav.orig_ty).ptrAlignment(emit.pt.zcu),
emit.lower.src_loc,
)) {
.mcv => |mcv| mcv.load_symbol,
.sym_index => |sym_index| sym_index,
.fail => |em| {
assert(emit.lower.err_msg == null);
emit.lower.err_msg = em;
@ -186,7 +186,7 @@ pub fn emitMir(emit: *Emit) Error!void {
else if (emit.bin_file.cast(.macho)) |macho_file|
try macho_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
else if (emit.bin_file.cast(.coff)) |coff_file|
link.File.Coff.global_symbol_bit | try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, "compiler_rt")
else
return emit.fail("external symbols unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
.is_extern = true,
@ -542,16 +542,13 @@ pub fn emitMir(emit: *Emit) Error!void {
emit.pt,
emit.lower.src_loc,
nav,
emit.lower.target.*,
emit.lower.target,
) catch |err| switch (err) {
error.CodegenFail,
=> return emit.fail("unable to codegen: {s}", .{@errorName(err)}),
else => |e| return e,
}) {
.mcv => |mcv| switch (mcv) {
else => unreachable,
.load_direct, .load_symbol => |sym_index| sym_index,
},
.sym_index => |sym_index| sym_index,
.fail => |em| {
assert(emit.lower.err_msg == null);
emit.lower.err_msg = em;
@ -564,10 +561,7 @@ pub fn emitMir(emit: *Emit) Error!void {
Type.fromInterned(uav.orig_ty).ptrAlignment(emit.pt.zcu),
emit.lower.src_loc,
)) {
.mcv => |mcv| switch (mcv) {
else => unreachable,
.load_direct, .load_symbol => |sym_index| sym_index,
},
.sym_index => |sym_index| sym_index,
.fail => |em| {
assert(emit.lower.err_msg == null);
emit.lower.err_msg = em;

View File

@ -598,7 +598,7 @@ pub const Op = enum {
.rax => .rax,
.cl => .cl,
.dx => .dx,
else => switch (reg.bitSize()) {
else => switch (reg.size().bitSize(target)) {
8 => .r8,
16 => .r16,
32 => .r32,
@ -615,7 +615,7 @@ pub const Op = enum {
.mmx => .mm,
.sse => switch (reg) {
.xmm0 => .xmm0,
else => switch (reg.bitSize()) {
else => switch (reg.size().bitSize(target)) {
128 => .xmm,
256 => .ymm,
else => unreachable,

View File

@ -12,6 +12,8 @@ extra: []const u32,
string_bytes: []const u8,
locals: []const Local,
table: []const Inst.Index,
/// Optional data which, when present, can be used to accelerate encoding speed.
memoized_encodings: []const u0 = &.{},
frame_locs: std.MultiArrayList(FrameLoc).Slice,
pub const Inst = struct {
@ -1963,6 +1965,7 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
gpa.free(mir.string_bytes);
gpa.free(mir.locals);
gpa.free(mir.table);
gpa.free(mir.memoized_encodings);
mir.frame_locs.deinit(gpa);
mir.* = undefined;
}

View File

@ -1,20 +1,86 @@
pub const Class = enum {
/// INTEGER: This class consists of integral types that fit into one of the general
/// purpose registers.
integer,
/// SSE: The class consists of types that fit into a vector register.
sse,
/// SSEUP: The class consists of types that fit into a vector register and can be passed
/// and returned in the upper bytes of it.
sseup,
/// X87, X87UP: These classes consist of types that will be returned via the
/// x87 FPU.
x87,
/// The 15-bit exponent, 1-bit sign, and 6 bytes of padding of an `f80`.
x87up,
complex_x87,
memory,
/// NO_CLASS: This class is used as initializer in the algorithms. It will be used for
/// padding and empty structures and unions.
none,
/// MEMORY: This class consists of types that will be passed and returned in mem-
/// ory via the stack.
memory,
/// Win64 passes 128-bit integers as `Class.memory` but returns them as `Class.sse`.
win_i128,
/// A `Class.sse` containing one `f32`.
float,
/// A `Class.sse` containing two `f32`s.
float_combine,
/// Clang passes each vector element in a separate `Class.integer`, but returns as `Class.memory`.
integer_per_element,
fn isX87(class: Class) bool {
pub const one_integer: [8]Class = .{
.integer, .none, .none, .none,
.none, .none, .none, .none,
};
pub const two_integers: [8]Class = .{
.integer, .integer, .none, .none,
.none, .none, .none, .none,
};
pub const three_integers: [8]Class = .{
.integer, .integer, .integer, .none,
.none, .none, .none, .none,
};
pub const four_integers: [8]Class = .{
.integer, .integer, .integer, .integer,
.none, .none, .none, .none,
};
pub const len_integers: [8]Class = .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
pub const @"f16" = @"f64";
pub const @"f32": [8]Class = .{
.float, .none, .none, .none,
.none, .none, .none, .none,
};
pub const @"f64": [8]Class = .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
pub const @"f80": [8]Class = .{
.x87, .x87up, .none, .none,
.none, .none, .none, .none,
};
pub const @"f128": [8]Class = .{
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
/// COMPLEX_X87: This class consists of types that will be returned via the x87
/// FPU.
pub const complex_x87: [8]Class = .{
.x87, .x87up, .x87, .x87up,
.none, .none, .none, .none,
};
pub const stack: [8]Class = .{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
pub fn isX87(class: Class) bool {
return switch (class) {
.x87, .x87up, .complex_x87 => true,
.x87, .x87up => true,
else => false,
};
}
@ -44,7 +110,7 @@ pub const Class = enum {
}
};
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
pub fn classifyWindows(ty: Type, zcu: *Zcu, target: *const std.Target) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
@ -53,7 +119,7 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
// "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.zigTypeTag(zcu)) {
return switch (ty.zigTypeTag(zcu)) {
.pointer,
.int,
.bool,
@ -70,19 +136,23 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
.frame,
=> switch (ty.abiSize(zcu)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
1, 2, 4, 8 => .integer,
else => switch (ty.zigTypeTag(zcu)) {
.int => return .win_i128,
.@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed") {
return .win_i128;
} else {
return .memory;
},
else => return .memory,
.int => .win_i128,
.@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed")
.win_i128
else
.memory,
else => .memory,
},
},
.float, .vector => return .sse,
.float => switch (ty.floatBits(target)) {
16, 32, 64, 128 => .sse,
80 => .memory,
else => unreachable,
},
.vector => .sse,
.type,
.comptime_float,
@ -93,171 +163,109 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
.@"opaque",
.enum_literal,
=> unreachable,
}
};
}
pub const Context = enum { ret, arg, field, other };
pub const Context = enum { ret, arg, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Context) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(zcu)) {
.pointer => switch (ty.ptrSize(zcu)) {
.slice => {
result[0] = .integer;
result[1] = .integer;
return result;
},
else => {
result[0] = .integer;
return result;
},
.slice => return Class.two_integers,
else => return Class.one_integer,
},
.int, .@"enum", .error_set => {
const bits = ty.intInfo(zcu).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
}
if (bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
if (bits <= 192) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
return result;
}
if (bits <= 256) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
result[3] = .integer;
return result;
}
return memory_class;
if (bits <= 64 * 1) return Class.one_integer;
if (bits <= 64 * 2) return Class.two_integers;
if (bits <= 64 * 3) return Class.three_integers;
if (bits <= 64 * 4) return Class.four_integers;
return Class.stack;
},
.bool, .void, .noreturn => {
result[0] = .integer;
return result;
},
.float => switch (ty.floatBits(target.*)) {
.bool, .void, .noreturn => return Class.one_integer,
.float => switch (ty.floatBits(target)) {
16 => {
if (ctx == .field) {
result[0] = .memory;
} else {
// TODO clang doesn't allow __fp16 as .ret or .arg
result[0] = .sse;
}
return result;
},
32 => {
result[0] = .float;
return result;
},
64 => {
result[0] = .sse;
return result;
},
128 => {
// "Arguments of types __float128, _Decimal128 and __m128 are
// split into two halves. The least significant ones belong
// to class SSE, the most significant one to class SSEUP."
result[0] = .sse;
result[1] = .sseup;
return result;
},
80 => {
// "The 64-bit mantissa of arguments of type long double
// belongs to classX87, the 16-bit exponent plus 6 bytes
// of padding belongs to class X87UP."
result[0] = .x87;
result[1] = .x87up;
return result;
if (ctx == .other) return Class.stack;
// TODO clang doesn't allow __fp16 as .ret or .arg
return Class.f16;
},
32 => return Class.f32,
64 => return Class.f64,
// "Arguments of types __float128, _Decimal128 and __m128 are
// split into two halves. The least significant ones belong
// to class SSE, the most significant one to class SSEUP."
128 => return Class.f128,
// "The 64-bit mantissa of arguments of type long double
// belongs to class X87, the 16-bit exponent plus 6 bytes
// of padding belongs to class X87UP."
80 => return Class.f80,
else => unreachable,
},
.vector => {
const elem_ty = ty.childType(zcu);
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
if (elem_ty.toIntern() == .bool_type) {
if (bits <= 32) return .{
.integer, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg) {
if (bits <= 128) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 256 and target.cpu.has(.x86, .avx)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 512 and target.cpu.has(.x86, .avx512f)) return .{
.integer_per_element, .none, .none, .none,
.none, .none, .none, .none,
};
}
return memory_class;
if (bits <= 32) return Class.one_integer;
if (bits <= 64) return Class.f64;
if (ctx == .other) return Class.stack;
if (bits <= 128) return Class.len_integers;
if (bits <= 256 and target.cpu.has(.x86, .avx)) return Class.len_integers;
if (bits <= 512 and target.cpu.has(.x86, .avx512f)) return Class.len_integers;
return Class.stack;
}
if (bits <= 64) return .{
if (elem_ty.isRuntimeFloat() and elem_ty.floatBits(target) == 80) {
if (bits <= 80 * 1) return Class.f80;
if (bits <= 80 * 2) return Class.complex_x87;
return Class.stack;
}
if (bits <= 64 * 1) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 128) return .{
if (bits <= 64 * 2) return .{
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
if (ctx == .arg and !target.cpu.has(.x86, .avx)) return memory_class;
if (bits <= 192) return .{
if (ctx == .arg and !target.cpu.has(.x86, .avx)) return Class.stack;
if (bits <= 64 * 3) return .{
.sse, .sseup, .sseup, .none,
.none, .none, .none, .none,
};
if (bits <= 256) return .{
if (bits <= 64 * 4) return .{
.sse, .sseup, .sseup, .sseup,
.none, .none, .none, .none,
};
if (ctx == .arg and !target.cpu.has(.x86, .avx512f)) return memory_class;
if (bits <= 320) return .{
if (ctx == .arg and !target.cpu.has(.x86, .avx512f)) return Class.stack;
if (bits <= 64 * 5) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .none, .none, .none,
};
if (bits <= 384) return .{
if (bits <= 64 * 6) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .none, .none,
};
if (bits <= 448) return .{
if (bits <= 64 * 7) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .none,
};
if (bits <= 512 or (ctx == .ret and bits <= @as(u64, if (target.cpu.has(.x86, .avx512f))
2048
if (bits <= 64 * 8 or (ctx == .ret and bits <= @as(u64, if (target.cpu.has(.x86, .avx512f))
64 * 32
else if (target.cpu.has(.x86, .avx))
1024
64 * 16
else
512))) return .{
64 * 8))) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .sseup,
};
return memory_class;
return Class.stack;
},
.optional => {
if (ty.optionalReprIsPayload(zcu)) {
return classifySystemV(ty.optionalChild(zcu), zcu, target, ctx);
}
return memory_class;
return Class.stack;
},
.@"struct", .@"union" => {
// "If the size of an object is larger than eight eightbytes, or
@ -269,15 +277,14 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
.auto => unreachable,
.@"extern" => {},
.@"packed" => {
assert(ty_size <= 16);
result[0] = .integer;
if (ty_size > 8) result[1] = .integer;
return result;
if (ty_size <= 8) return Class.one_integer;
if (ty_size <= 16) return Class.two_integers;
unreachable; // frontend should not have allowed this type as extern
},
}
if (ty_size > 64)
return memory_class;
if (ty_size > 64) return Class.stack;
var result: [8]Class = @splat(.none);
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
else if (zcu.typeToUnion(ty)) |loaded_union|
@ -290,15 +297,15 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
for (result, 0..) |class, i| switch (class) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.memory => return Class.stack,
.x87up => if (i == 0 or result[i - 1] != .x87) return Class.stack,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isnt SSE or any other eightbyte isnt SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or
std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return memory_class;
std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return Class.stack;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (&result, 0..) |*item, i| {
@ -311,16 +318,9 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
},
.array => {
const ty_size = ty.abiSize(zcu);
if (ty_size <= 8) {
result[0] = .integer;
return result;
}
if (ty_size <= 16) {
result[0] = .integer;
result[1] = .integer;
return result;
}
return memory_class;
if (ty_size <= 8) return Class.one_integer;
if (ty_size <= 16) return Class.two_integers;
return Class.stack;
},
else => unreachable,
}
@ -363,7 +363,7 @@ fn classifySystemVStruct(
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .other), .none);
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
byte_offset += field_ty.abiSize(zcu);
@ -406,7 +406,7 @@ fn classifySystemVUnion(
.@"packed" => {},
}
}
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .other), .none);
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
result_class.* = result_class.combineSystemV(field_class);
}

View File

@ -465,25 +465,25 @@ pub const Register = enum(u8) {
return @intCast(@intFromEnum(reg) - base);
}
pub fn bitSize(reg: Register) u10 {
pub fn size(reg: Register) Memory.Size {
return switch (@intFromEnum(reg)) {
// zig fmt: off
@intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => 64,
@intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => 32,
@intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => 16,
@intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => 8,
@intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => 8,
@intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => .qword,
@intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => .dword,
@intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => .word,
@intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => .byte,
@intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => .byte,
@intFromEnum(Register.zmm0) ... @intFromEnum(Register.zmm15) => 512,
@intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => 256,
@intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => 128,
@intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => 64,
@intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => 80,
@intFromEnum(Register.zmm0) ... @intFromEnum(Register.zmm15) => .zword,
@intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => .yword,
@intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => .xword,
@intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => .qword,
@intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => .tbyte,
@intFromEnum(Register.es) ... @intFromEnum(Register.gs) => 16,
@intFromEnum(Register.es) ... @intFromEnum(Register.gs) => .word,
@intFromEnum(Register.cr0) ... @intFromEnum(Register.cr15) => 64,
@intFromEnum(Register.dr0) ... @intFromEnum(Register.dr15) => 64,
@intFromEnum(Register.cr0) ... @intFromEnum(Register.cr15) => .gpr,
@intFromEnum(Register.dr0) ... @intFromEnum(Register.dr15) => .gpr,
else => unreachable,
// zig fmt: on
@ -549,8 +549,8 @@ pub const Register = enum(u8) {
};
}
pub fn toSize(reg: Register, size: Memory.Size, target: *const std.Target) Register {
return switch (size) {
pub fn toSize(reg: Register, new_size: Memory.Size, target: *const std.Target) Register {
return switch (new_size) {
.none => unreachable,
.ptr => reg.toBitSize(target.ptrBitWidth()),
.gpr => switch (target.cpu.arch) {

View File

@ -65,7 +65,7 @@ fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*const Air.Legalize.Features {
const zcu = pt.zcu;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
switch (target_util.zigBackend(target.*, zcu.comp.config.use_llvm)) {
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_llvm,
.stage2_c,
@ -114,7 +114,7 @@ pub const AnyMir = union {
pub fn deinit(mir: *AnyMir, zcu: *const Zcu) void {
const gpa = zcu.gpa;
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
inline .stage2_aarch64,
@ -145,7 +145,7 @@ pub fn generateFunction(
) CodeGenError!AnyMir {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_aarch64,
@ -183,7 +183,7 @@ pub fn emitFunction(
) CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_aarch64,
@ -210,7 +210,7 @@ pub fn generateLazyFunction(
) CodeGenError!void {
const zcu = pt.zcu;
const target = if (Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu)) |inst_index|
zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
&zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
else
zcu.getTarget();
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
@ -225,7 +225,7 @@ pub fn generateLazyFunction(
}
}
fn writeFloat(comptime F: type, f: F, target: std.Target, endian: std.builtin.Endian, code: []u8) void {
fn writeFloat(comptime F: type, f: F, target: *const std.Target, endian: std.builtin.Endian, code: []u8) void {
_ = target;
const bits = @typeInfo(F).float.bits;
const Int = @Type(.{ .int = .{ .signedness = .unsigned, .bits = bits } });
@ -253,7 +253,7 @@ pub fn generateLazySymbol(
const gpa = comp.gpa;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const endian = target.cpu.arch.endian();
log.debug("generateLazySymbol: kind = {s}, ty = {}", .{
@ -810,7 +810,7 @@ fn lowerUavRef(
const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) {
.mcv => {},
.sym_index => {},
.fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}),
}
@ -839,7 +839,7 @@ fn lowerNavRef(
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = lf.comp.config.output_mode == .Obj;
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
@ -920,6 +920,90 @@ pub const LinkerLoad = struct {
sym_index: u32,
};
pub const SymbolResult = union(enum) { sym_index: u32, fail: *ErrorMsg };
pub fn genNavRef(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
nav_index: InternPool.Nav.Index,
target: *const std.Target,
) CodeGenError!SymbolResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
log.debug("genNavRef({})", .{nav.fqn.fmt(ip)});
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
else
.{ .none, .internal, false };
if (lf.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
switch (linkage) {
.internal => {
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
return .{ .sym_index = sym_index };
},
.strong, .weak => {
const sym_index = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
switch (linkage) {
.internal => unreachable,
.strong => {},
.weak => zo.symbol(sym_index).flags.weak = true,
.link_once => unreachable,
}
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
return .{ .sym_index = sym_index };
},
.link_once => unreachable,
}
} else if (lf.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
switch (linkage) {
.internal => {
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
return .{ .sym_index = sym_index };
},
.strong, .weak => {
const sym_index = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
switch (linkage) {
.internal => unreachable,
.strong => {},
.weak => zo.symbols.items[sym_index].flags.weak = true,
.link_once => unreachable,
}
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
return .{ .sym_index = sym_index };
},
.link_once => unreachable,
}
} else if (lf.cast(.coff)) |coff_file| {
// TODO audit this
switch (linkage) {
.internal => {
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return .{ .sym_index = sym_index };
},
.strong, .weak => {
const global_index = try coff_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
try coff_file.need_got_table.put(zcu.gpa, global_index, {}); // needs GOT
return .{ .sym_index = global_index };
},
.link_once => unreachable,
}
} else if (lf.cast(.plan9)) |p9| {
return .{ .sym_index = try p9.seeNav(pt, nav_index) };
} else {
const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
return .{ .fail = msg };
}
}
/// deprecated legacy type
pub const GenResult = union(enum) {
mcv: MCValue,
fail: *ErrorMsg,
@ -951,121 +1035,36 @@ pub const GenResult = union(enum) {
};
};
pub fn genNavRef(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
nav_index: InternPool.Nav.Index,
target: std.Target,
) CodeGenError!GenResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
log.debug("genNavRef({})", .{nav.fqn.fmt(ip)});
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
else
.{ .none, .internal, false };
if (lf.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
switch (linkage) {
.internal => {
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.strong, .weak => {
const sym_index = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
switch (linkage) {
.internal => unreachable,
.strong => {},
.weak => zo.symbol(sym_index).flags.weak = true,
.link_once => unreachable,
}
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.link_once => unreachable,
}
} else if (lf.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
switch (linkage) {
.internal => {
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.strong, .weak => {
const sym_index = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
switch (linkage) {
.internal => unreachable,
.strong => {},
.weak => zo.symbols.items[sym_index].flags.weak = true,
.link_once => unreachable,
}
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.link_once => unreachable,
}
} else if (lf.cast(.coff)) |coff_file| {
// TODO audit this
switch (linkage) {
.internal => {
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return .{ .mcv = .{ .lea_symbol = sym_index } };
},
.strong, .weak => {
const global_index = try coff_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
try coff_file.need_got_table.put(zcu.gpa, global_index, {}); // needs GOT
return .{ .mcv = .{ .lea_symbol = global_index } };
},
.link_once => unreachable,
}
} else if (lf.cast(.plan9)) |p9| {
const atom_index = try p9.seeNav(pt, nav_index);
const atom = p9.getAtom(atom_index);
return .{ .mcv = .{ .memory = atom.getOffsetTableAddress(p9) } };
} else {
const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
return .{ .fail = msg };
}
}
/// deprecated legacy code path
pub fn genTypedValue(
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
val: Value,
target: std.Target,
target: *const std.Target,
) CodeGenError!GenResult {
return switch (try lowerValue(pt, val, &target)) {
const res = try lowerValue(pt, val, target);
return switch (res) {
.none => .{ .mcv = .none },
.undef => .{ .mcv = .undef },
.immediate => |imm| .{ .mcv = .{ .immediate = imm } },
.lea_nav => |nav| genNavRef(lf, pt, src_loc, nav, target),
.lea_uav => |uav| switch (try lf.lowerUav(
.lea_nav => |nav| switch (try genNavRef(lf, pt, src_loc, nav, target)) {
.sym_index => |sym_index| .{ .mcv = .{ .lea_symbol = sym_index } },
.fail => |em| .{ .fail = em },
},
.load_uav, .lea_uav => |uav| switch (try lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
src_loc,
)) {
.mcv => |mcv| .{ .mcv = switch (mcv) {
.sym_index => |sym_index| .{ .mcv = switch (res) {
else => unreachable,
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_symbol => |sym_index| .{ .lea_symbol = sym_index },
.load_uav => .{ .load_symbol = sym_index },
.lea_uav => .{ .lea_symbol = sym_index },
} },
.fail => |em| .{ .fail = em },
},
.load_uav => |uav| lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
src_loc,
),
};
}
@ -1076,8 +1075,8 @@ const LowerResult = union(enum) {
/// such as ARM, the immediate will never exceed 32-bits.
immediate: u64,
lea_nav: InternPool.Nav.Index,
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
load_uav: InternPool.Key.Ptr.BaseAddr.Uav,
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
};
pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allocator.Error!LowerResult {

View File

@ -1080,7 +1080,7 @@ pub const DeclGen = struct {
},
.enum_tag => |enum_tag| try dg.renderValue(writer, Value.fromInterned(enum_tag.int), location),
.float => {
const bits = ty.floatBits(target.*);
const bits = ty.floatBits(target);
const f128_val = val.toFloat(f128, zcu);
// All unsigned ints matching float types are pre-allocated.
@ -1608,7 +1608,7 @@ pub const DeclGen = struct {
.f80_type,
.f128_type,
=> {
const bits = ty.floatBits(target.*);
const bits = ty.floatBits(target);
// All unsigned ints matching float types are pre-allocated.
const repr_ty = dg.pt.intType(.unsigned, bits) catch unreachable;
@ -6543,7 +6543,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
const scalar_ty = operand_ty.scalarType(zcu);
const target = &f.object.dg.mod.resolved_target.result;
const operation = if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isRuntimeFloat())
if (inst_scalar_ty.floatBits(target.*) < scalar_ty.floatBits(target.*)) "trunc" else "extend"
if (inst_scalar_ty.floatBits(target) < scalar_ty.floatBits(target)) "trunc" else "extend"
else if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat())
if (inst_scalar_ty.isSignedInt(zcu)) "fix" else "fixuns"
else if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isInt(zcu))
@ -6565,8 +6565,8 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue {
}
try writer.writeAll("zig_");
try writer.writeAll(operation);
try writer.writeAll(compilerRtAbbrev(scalar_ty, zcu, target.*));
try writer.writeAll(compilerRtAbbrev(inst_scalar_ty, zcu, target.*));
try writer.writeAll(compilerRtAbbrev(scalar_ty, zcu, target));
try writer.writeAll(compilerRtAbbrev(inst_scalar_ty, zcu, target));
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try v.elem(f, writer);
@ -8073,7 +8073,7 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 {
};
}
fn compilerRtAbbrev(ty: Type, zcu: *Zcu, target: std.Target) []const u8 {
fn compilerRtAbbrev(ty: Type, zcu: *Zcu, target: *const std.Target) []const u8 {
return if (ty.isInt(zcu)) switch (ty.intInfo(zcu).bits) {
1...32 => "si",
33...64 => "di",

View File

@ -1319,9 +1319,9 @@ pub const Pool = struct {
},
else => {
const target = &mod.resolved_target.result;
const abi_align_bytes = std.zig.target.intAlignment(target.*, int_info.bits);
const abi_align_bytes = std.zig.target.intAlignment(target, int_info.bits);
const array_ctype = try pool.getArray(allocator, .{
.len = @divExact(std.zig.target.intByteSize(target.*, int_info.bits), abi_align_bytes),
.len = @divExact(std.zig.target.intByteSize(target, int_info.bits), abi_align_bytes),
.elem_ctype = try pool.fromIntInfo(allocator, .{
.signedness = .unsigned,
.bits = @intCast(abi_align_bytes * 8),
@ -1438,13 +1438,13 @@ pub const Pool = struct {
.elem_ctype = .u8,
.@"const" = true,
}),
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)),
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
},
.{
.name = .{ .index = .len },
.ctype = .usize,
.alignas = AlignAs.fromAbiAlignment(
.fromByteUnits(std.zig.target.intAlignment(target.*, target.ptrBitWidth())),
.fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
),
},
};
@ -2246,13 +2246,13 @@ pub const Pool = struct {
mod,
kind,
),
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)),
.alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)),
},
.{
.name = .{ .index = .len },
.ctype = .usize,
.alignas = AlignAs.fromAbiAlignment(
.fromByteUnits(std.zig.target.intAlignment(target.*, target.ptrBitWidth())),
.fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())),
),
},
};
@ -2372,7 +2372,7 @@ pub const Pool = struct {
.name = .{ .index = .@"error" },
.ctype = error_set_ctype,
.alignas = AlignAs.fromAbiAlignment(
.fromByteUnits(std.zig.target.intAlignment(target.*, error_set_bits)),
.fromByteUnits(std.zig.target.intAlignment(target, error_set_bits)),
),
},
.{

View File

@ -43,7 +43,7 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
});
}
fn subArchName(target: std.Target, comptime family: std.Target.Cpu.Arch.Family, mappings: anytype) ?[]const u8 {
fn subArchName(target: *const std.Target, comptime family: std.Target.Cpu.Arch.Family, mappings: anytype) ?[]const u8 {
inline for (mappings) |mapping| {
if (target.cpu.has(family, mapping[0])) return mapping[1];
}
@ -51,7 +51,7 @@ fn subArchName(target: std.Target, comptime family: std.Target.Cpu.Arch.Family,
return null;
}
pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 {
var llvm_triple = std.ArrayList(u8).init(allocator);
defer llvm_triple.deinit();
@ -309,7 +309,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
return llvm_triple.toOwnedSlice();
}
pub fn supportsTailCall(target: std.Target) bool {
pub fn supportsTailCall(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.wasm32, .wasm64 => target.cpu.has(.wasm, .tail_call),
// Although these ISAs support tail calls, LLVM does not support tail calls on them.
@ -319,7 +319,7 @@ pub fn supportsTailCall(target: std.Target) bool {
};
}
pub fn dataLayout(target: std.Target) []const u8 {
pub fn dataLayout(target: *const std.Target) []const u8 {
// These data layouts should match Clang.
return switch (target.cpu.arch) {
.arc => "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-f32:32:32-i64:32-f64:32-a:0:32-n32",
@ -475,7 +475,7 @@ const CodeModel = enum {
large,
};
fn codeModel(model: std.builtin.CodeModel, target: std.Target) CodeModel {
fn codeModel(model: std.builtin.CodeModel, target: *const std.Target) CodeModel {
// Roughly match Clang's mapping of GCC code models to LLVM code models.
return switch (model) {
.default => .default,
@ -508,7 +508,7 @@ pub const Object = struct {
debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata),
target: std.Target,
target: *const std.Target,
/// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function,
/// but that has some downsides:
/// * we have to compute the fully qualified name every time we want to do the lookup
@ -562,7 +562,7 @@ pub const Object = struct {
pub fn create(arena: Allocator, comp: *Compilation) !Ptr {
dev.check(.llvm_backend);
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const llvm_target_triple = try targetTriple(arena, target);
var builder = try Builder.init(.{
@ -827,7 +827,7 @@ pub const Object = struct {
const behavior_max = try o.builder.metadataConstant(try o.builder.intConst(.i32, 7));
const behavior_min = try o.builder.metadataConstant(try o.builder.intConst(.i32, 8));
if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |abi| {
if (target_util.llvmMachineAbi(&comp.root_mod.resolved_target.result)) |abi| {
module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
behavior_error,
try o.builder.metadataString("target-abi"),
@ -837,7 +837,7 @@ pub const Object = struct {
));
}
const pic_level = target_util.picLevel(comp.root_mod.resolved_target.result);
const pic_level = target_util.picLevel(&comp.root_mod.resolved_target.result);
if (comp.root_mod.pic) {
module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
behavior_min,
@ -860,7 +860,7 @@ pub const Object = struct {
try o.builder.metadataString("Code Model"),
try o.builder.metadataConstant(try o.builder.intConst(.i32, @as(
i32,
switch (codeModel(comp.root_mod.code_model, comp.root_mod.resolved_target.result)) {
switch (codeModel(comp.root_mod.code_model, &comp.root_mod.resolved_target.result)) {
.default => unreachable,
.tiny => 0,
.small => 1,
@ -906,7 +906,7 @@ pub const Object = struct {
}
}
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
if (target.os.tag == .windows and (target.cpu.arch == .x86_64 or target.cpu.arch == .x86)) {
// Add the "RegCallv4" flag so that any functions using `x86_regcallcc` use regcall
// v4, which is essentially a requirement on Windows. See corresponding logic in
@ -1020,7 +1020,7 @@ pub const Object = struct {
else
.Static;
const code_model: llvm.CodeModel = switch (codeModel(comp.root_mod.code_model, comp.root_mod.resolved_target.result)) {
const code_model: llvm.CodeModel = switch (codeModel(comp.root_mod.code_model, &comp.root_mod.resolved_target.result)) {
.default => .Default,
.tiny => .Tiny,
.small => .Small,
@ -1045,7 +1045,7 @@ pub const Object = struct {
comp.function_sections,
comp.data_sections,
float_abi,
if (target_util.llvmMachineAbi(comp.root_mod.resolved_target.result)) |s| s.ptr else null,
if (target_util.llvmMachineAbi(&comp.root_mod.resolved_target.result)) |s| s.ptr else null,
);
errdefer target_machine.dispose();
@ -1137,7 +1137,7 @@ pub const Object = struct {
const owner_mod = zcu.fileByIndex(file_scope).mod.?;
const fn_ty = Type.fromInterned(func.ty);
const fn_info = zcu.typeToFunc(fn_ty).?;
const target = owner_mod.resolved_target.result;
const target = &owner_mod.resolved_target.result;
var ng: NavGen = .{
.object = o,
@ -2699,7 +2699,7 @@ pub const Object = struct {
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function;
const fn_info = zcu.typeToFunc(ty).?;
const target = owner_mod.resolved_target.result;
const target = &owner_mod.resolved_target.result;
const sret = firstParamSRet(fn_info, zcu, target);
const is_extern, const lib_name = if (nav.getExtern(ip)) |@"extern"|
@ -2913,7 +2913,7 @@ pub const Object = struct {
try attributes.addFnAttr(.minsize, &o.builder);
try attributes.addFnAttr(.optsize, &o.builder);
}
const target = owner_mod.resolved_target.result;
const target = &owner_mod.resolved_target.result;
if (target.cpu.model.llvm_name) |s| {
try attributes.addFnAttr(.{ .string = .{
.kind = try o.builder.string("target-cpu"),
@ -4445,7 +4445,7 @@ pub const Object = struct {
if (o.builder.getGlobal(name)) |llvm_fn| return llvm_fn.ptrConst(&o.builder).kind.function;
const zcu = o.pt.zcu;
const target = zcu.root_mod.resolved_target.result;
const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.errorIntType()}, .normal),
name,
@ -4474,7 +4474,7 @@ pub const Object = struct {
const usize_ty = try o.lowerType(Type.usize);
const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0);
const target = zcu.root_mod.resolved_target.result;
const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
try o.builder.strtabStringFmt("__zig_tag_name_{}", .{enum_type.name.fmt(ip)}),
@ -10372,7 +10372,7 @@ pub const FuncGen = struct {
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(o.named_enum_map.remove(enum_ty.toIntern()));
const target = zcu.root_mod.resolved_target.result;
const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal),
try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{enum_type.name.fmt(ip)}),
@ -11834,7 +11834,7 @@ const CallingConventionInfo = struct {
inreg_param_count: u2 = 0,
};
pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) ?CallingConventionInfo {
pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: *const std.Target) ?CallingConventionInfo {
const llvm_cc = toLlvmCallConvTag(cc, target) orelse return null;
const incoming_stack_alignment: ?u64, const register_params: u2 = switch (cc) {
inline else => |pl| switch (@TypeOf(pl)) {
@ -11858,7 +11858,7 @@ pub fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) ?Ca
.inreg_param_count = register_params,
};
}
fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: std.Target) ?Builder.CallConv {
fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: *const std.Target) ?Builder.CallConv {
if (target.cCallingConvention()) |default_c| {
if (cc_tag == default_c) {
return .ccc;
@ -11972,7 +11972,7 @@ fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: std.Targ
}
/// Convert a zig-address space to an llvm address space.
fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace {
fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: *const std.Target) Builder.AddrSpace {
for (llvmAddrSpaceInfo(target)) |info| if (info.zig == address_space) return info.llvm;
unreachable;
}
@ -11987,7 +11987,7 @@ const AddrSpaceInfo = struct {
idx: ?u16 = null,
force_in_data_layout: bool = false,
};
fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo {
fn llvmAddrSpaceInfo(target: *const std.Target) []const AddrSpaceInfo {
return switch (target.cpu.arch) {
.x86, .x86_64 => &.{
.{ .zig = .generic, .llvm = .default },
@ -12063,7 +12063,7 @@ fn llvmAddrSpaceInfo(target: std.Target) []const AddrSpaceInfo {
/// different address, space and then cast back to the generic address space.
/// For example, on GPUs local variable declarations must be generated into the local address space.
/// This function returns the address space local values should be generated into.
fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace {
fn llvmAllocaAddressSpace(target: *const std.Target) Builder.AddrSpace {
return switch (target.cpu.arch) {
// On amdgcn, locals should be generated into the private address space.
// To make Zig not impossible to use, these are then converted to addresses in the
@ -12075,7 +12075,7 @@ fn llvmAllocaAddressSpace(target: std.Target) Builder.AddrSpace {
/// On some targets, global values that are in the generic address space must be generated into a
/// different address space, and then cast back to the generic address space.
fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace {
fn llvmDefaultGlobalAddressSpace(target: *const std.Target) Builder.AddrSpace {
return switch (target.cpu.arch) {
// On amdgcn, globals must be explicitly allocated and uploaded so that the program can access
// them.
@ -12086,14 +12086,14 @@ fn llvmDefaultGlobalAddressSpace(target: std.Target) Builder.AddrSpace {
/// Return the actual address space that a value should be stored in if its a global address space.
/// When a value is placed in the resulting address space, it needs to be cast back into wanted_address_space.
fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: std.Target) Builder.AddrSpace {
fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, target: *const std.Target) Builder.AddrSpace {
return switch (wanted_address_space) {
.generic => llvmDefaultGlobalAddressSpace(target),
else => |as| toLlvmAddressSpace(as, target),
};
}
fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
fn returnTypeByRef(zcu: *Zcu, target: *const std.Target, ty: Type) bool {
if (isByRef(ty, zcu)) {
return true;
} else if (target.cpu.arch.isX86() and
@ -12108,14 +12108,14 @@ fn returnTypeByRef(zcu: *Zcu, target: std.Target, ty: Type) bool {
}
}
fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Target) bool {
fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: *const std.Target) bool {
const return_type = Type.fromInterned(fn_info.return_type);
if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) return false;
return switch (fn_info.cc) {
.auto => returnTypeByRef(zcu, target, return_type),
.x86_64_sysv => firstParamSRetSystemV(return_type, zcu, target),
.x86_64_win => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
.x86_64_win => x86_64_abi.classifyWindows(return_type, zcu, target) == .memory,
.x86_sysv, .x86_win => isByRef(return_type, zcu),
.x86_stdcall => !isScalar(zcu, return_type),
.wasm_mvp => wasm_c_abi.classifyType(return_type, zcu) == .indirect,
@ -12137,8 +12137,8 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe
};
}
fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: std.Target) bool {
const class = x86_64_abi.classifySystemV(ty, zcu, &target, .ret);
fn firstParamSRetSystemV(ty: Type, zcu: *Zcu, target: *const std.Target) bool {
const class = x86_64_abi.classifySystemV(ty, zcu, target, .ret);
if (class[0] == .memory) return true;
if (class[0] == .x87 and class[2] != .none) return true;
return false;
@ -12215,7 +12215,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const zcu = o.pt.zcu;
const return_type = Type.fromInterned(fn_info.return_type);
switch (x86_64_abi.classifyWindows(return_type, zcu)) {
switch (x86_64_abi.classifyWindows(return_type, zcu, zcu.getTarget())) {
.integer => {
if (isScalar(zcu, return_type)) {
return o.lowerType(return_type);
@ -12238,9 +12238,7 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (isScalar(zcu, return_type)) {
return o.lowerType(return_type);
}
const target = zcu.getTarget();
const classes = x86_64_abi.classifySystemV(return_type, zcu, &target, .ret);
if (classes[0] == .memory) return .void;
const classes = x86_64_abi.classifySystemV(return_type, zcu, zcu.getTarget(), .ret);
var types_index: u32 = 0;
var types_buffer: [8]Builder.Type = undefined;
for (classes) |class| {
@ -12275,15 +12273,9 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
types_index += 1;
},
.x87up => continue,
.complex_x87 => {
@panic("TODO");
},
.memory => unreachable, // handled above
.win_i128 => unreachable, // windows only
.none => break,
.integer_per_element => {
@panic("TODO");
},
.memory, .integer_per_element => return .void,
.win_i128 => unreachable, // windows only
}
}
const first_non_integer = std.mem.indexOfNone(x86_64_abi.Class, &classes, &.{.integer});
@ -12493,7 +12485,7 @@ const ParamTypeIterator = struct {
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
const zcu = it.object.pt.zcu;
switch (x86_64_abi.classifyWindows(ty, zcu)) {
switch (x86_64_abi.classifyWindows(ty, zcu, zcu.getTarget())) {
.integer => {
if (isScalar(zcu, ty)) {
it.zig_index += 1;
@ -12527,8 +12519,7 @@ const ParamTypeIterator = struct {
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const zcu = it.object.pt.zcu;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
const classes = x86_64_abi.classifySystemV(ty, zcu, &target, .arg);
const classes = x86_64_abi.classifySystemV(ty, zcu, zcu.getTarget(), .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
it.llvm_index += 1;
@ -12575,12 +12566,9 @@ const ParamTypeIterator = struct {
return .byref;
},
.x87up => unreachable,
.complex_x87 => {
@panic("TODO");
},
.none => break,
.memory => unreachable, // handled above
.win_i128 => unreachable, // windows only
.none => break,
.integer_per_element => {
@panic("TODO");
},
@ -12794,7 +12782,7 @@ fn isScalar(zcu: *Zcu, ty: Type) bool {
/// This function returns true if we expect LLVM to lower x86_fp80 correctly
/// and false if we expect LLVM to crash if it encounters an x86_fp80 type,
/// or if it produces miscompilations.
fn backendSupportsF80(target: std.Target) bool {
fn backendSupportsF80(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.x86, .x86_64 => !target.cpu.has(.x86, .soft_float),
else => false,
@ -12804,7 +12792,7 @@ fn backendSupportsF80(target: std.Target) bool {
/// This function returns true if we expect LLVM to lower f16 correctly
/// and false if we expect LLVM to crash if it encounters an f16 type,
/// or if it produces miscompilations.
fn backendSupportsF16(target: std.Target) bool {
fn backendSupportsF16(target: *const std.Target) bool {
return switch (target.cpu.arch) {
// https://github.com/llvm/llvm-project/issues/97981
.csky,
@ -12840,7 +12828,7 @@ fn backendSupportsF16(target: std.Target) bool {
/// This function returns true if we expect LLVM to lower f128 correctly,
/// and false if we expect LLVM to crash if it encounters an f128 type,
/// or if it produces miscompilations.
fn backendSupportsF128(target: std.Target) bool {
fn backendSupportsF128(target: *const std.Target) bool {
return switch (target.cpu.arch) {
// https://github.com/llvm/llvm-project/issues/121122
.amdgcn,
@ -12870,7 +12858,7 @@ fn backendSupportsF128(target: std.Target) bool {
/// LLVM does not support all relevant intrinsics for all targets, so we
/// may need to manually generate a compiler-rt call.
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
fn intrinsicsAllowed(scalar_ty: Type, target: *const std.Target) bool {
return switch (scalar_ty.toIntern()) {
.f16_type => backendSupportsF16(target),
.f80_type => (target.cTypeBitSize(.longdouble) == 80) and backendSupportsF80(target),
@ -12907,7 +12895,7 @@ fn buildAllocaInner(
wip: *Builder.WipFunction,
llvm_ty: Builder.Type,
alignment: Builder.Alignment,
target: std.Target,
target: *const std.Target,
) Allocator.Error!Builder.Value {
const address_space = llvmAllocaAddressSpace(target);

View File

@ -185,7 +185,7 @@ pub const Object = struct {
/// related to that.
error_buffer: ?SpvModule.Decl.Index = null,
pub fn init(gpa: Allocator, target: std.Target) Object {
pub fn init(gpa: Allocator, target: *const std.Target) Object {
return .{
.gpa = gpa,
.spv = SpvModule.init(gpa, target),

View File

@ -107,7 +107,7 @@ gpa: Allocator,
arena: std.heap.ArenaAllocator,
/// Target info
target: std.Target,
target: *const std.Target,
/// The target SPIR-V version
version: spec.Version,
@ -187,7 +187,7 @@ decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
/// The list of entry points that should be exported from this module.
entry_points: std.AutoArrayHashMapUnmanaged(IdRef, EntryPoint) = .empty,
pub fn init(gpa: Allocator, target: std.Target) Module {
pub fn init(gpa: Allocator, target: *const std.Target) Module {
const version_minor: u8 = blk: {
// Prefer higher versions
if (target.cpu.has(.spirv, .v1_6)) break :blk 6;

View File

@ -66,7 +66,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
// In all cases in this function, we add the C compiler flags to
// cache_exempt_flags rather than extra_flags, because these arguments
@ -407,7 +407,7 @@ pub const BuiltSharedObjects = struct {
const all_map_basename = "all.map";
fn wordDirective(target: std.Target) []const u8 {
fn wordDirective(target: *const std.Target) []const u8 {
// Based on its description in the GNU `as` manual, you might assume that `.word` is sized
// according to the target word size. But no; that would just make too much sense.
return if (target.ptrBitWidth() == 64) ".quad" else ".long";

View File

@ -172,7 +172,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const target_ver = target.os.versionRange().gnuLibCVersion().?;
const nonshared_stat = target_ver.order(.{ .major = 2, .minor = 32, .patch = 0 }) != .gt;
const start_old_init_fini = target_ver.order(.{ .major = 2, .minor = 33, .patch = 0 }) != .gt;
@ -485,7 +485,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
fn add_include_dirs_arch(
arena: Allocator,
args: *std.ArrayList([]const u8),
target: std.Target,
target: *const std.Target,
opt_nptl: ?[]const u8,
dir: []const u8,
) error{OutOfMemory}!void {
@ -649,7 +649,7 @@ pub const BuiltSharedObjects = struct {
const all_map_basename = "all.map";
fn wordDirective(target: std.Target) []const u8 {
fn wordDirective(target: *const std.Target) []const u8 {
// Based on its description in the GNU `as` manual, you might assume that `.word` is sized
// according to the target word size. But no; that would just make too much sense.
return if (target.ptrBitWidth() == 64) ".quad" else ".long";

View File

@ -121,7 +121,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
const root_name = "c++";
const output_mode = .Lib;
const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
@ -314,7 +314,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const root_name = "c++abi";
const output_mode = .Lib;
const link_mode = .static;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });

View File

@ -324,7 +324,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
comp.tsan_lib = crt_file;
}
fn addCcArgs(target: std.Target, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
fn addCcArgs(target: *const std.Target, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-nostdinc++",
"-fvisibility=hidden",

View File

@ -27,7 +27,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
const arena = arena_allocator.allocator();
const output_mode = .Lib;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const unwind_tables: std.builtin.UnwindTables =
if (target.cpu.arch == .x86 and target.os.tag == .windows) .none else .@"async";
const config = Compilation.Config.resolve(.{

View File

@ -299,7 +299,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var aro_comp = aro.Compilation.init(gpa, std.fs.cwd());
defer aro_comp.deinit();
aro_comp.target = target;
aro_comp.target = target.*;
const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
@ -373,7 +373,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
pub fn libExists(
allocator: Allocator,
target: std.Target,
target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
) !bool {
@ -389,7 +389,7 @@ pub fn libExists(
/// see if a .def file exists.
fn findDef(
allocator: Allocator,
target: std.Target,
target: *const std.Target,
zig_lib_directory: Cache.Directory,
lib_name: []const u8,
) ![]u8 {

View File

@ -193,7 +193,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
.link_libc = false,
});
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const arch_name = std.zig.target.muslArchName(target.cpu.arch, target.abi);
const time32 = for (time32_compat_arch_list) |time32_compat_arch| {
if (mem.eql(u8, arch_name, time32_compat_arch)) break true;

View File

@ -58,7 +58,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const target_version = target.os.version_range.semver.min;
// In all cases in this function, we add the C compiler flags to
@ -353,7 +353,7 @@ pub const BuiltSharedObjects = struct {
}
};
fn wordDirective(target: std.Target) []const u8 {
fn wordDirective(target: *const std.Target) []const u8 {
// Based on its description in the GNU `as` manual, you might assume that `.word` is sized
// according to the target word size. But no; that would just make too much sense.
return if (target.ptrBitWidth() == 64) ".quad" else ".long";

View File

@ -920,7 +920,7 @@ pub const File = struct {
decl_val: InternPool.Index,
decl_align: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
assert(base.comp.zcu.?.llvm_object == null);
switch (base.tag) {
.lld => unreachable,
@ -1321,7 +1321,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
const prog_node = comp.link_prog_node.start("Parse Host libc", 0);
defer prog_node.end();
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const flags = target_util.libcFullLinkFlags(target);
const crt_dir = comp.libc_installation.?.crt_dir.?;
const sep = std.fs.path.sep_str;
@ -1670,7 +1670,7 @@ pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
pub fn resolveInputs(
gpa: Allocator,
arena: Allocator,
target: std.Target,
target: *const std.Target,
/// This function mutates this array but does not take ownership.
/// Allocated with `gpa`.
unresolved_inputs: *std.ArrayListUnmanaged(UnresolvedInput),
@ -1914,7 +1914,7 @@ fn resolveLibInput(
ld_script_bytes: *std.ArrayListUnmanaged(u8),
lib_directory: Directory,
name_query: UnresolvedInput.NameQuery,
target: std.Target,
target: *const std.Target,
link_mode: std.builtin.LinkMode,
color: std.zig.Color,
) Allocator.Error!ResolveLibInputResult {
@ -2028,7 +2028,7 @@ fn resolvePathInput(
resolved_inputs: *std.ArrayListUnmanaged(Input),
/// Allocated via `gpa`.
ld_script_bytes: *std.ArrayListUnmanaged(u8),
target: std.Target,
target: *const std.Target,
pq: UnresolvedInput.PathQuery,
color: std.zig.Color,
) Allocator.Error!?ResolveLibInputResult {
@ -2070,7 +2070,7 @@ fn resolvePathInputLib(
resolved_inputs: *std.ArrayListUnmanaged(Input),
/// Allocated via `gpa`.
ld_script_bytes: *std.ArrayListUnmanaged(u8),
target: std.Target,
target: *const std.Target,
pq: UnresolvedInput.PathQuery,
link_mode: std.builtin.LinkMode,
color: std.zig.Color,

View File

@ -116,7 +116,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*C {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .c);
const optimize_mode = comp.root_mod.optimize_mode;
const use_lld = build_options.have_llvm and comp.config.use_lld;
@ -331,7 +331,7 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn
_ = ti_id;
}
fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) {
fn abiDefines(self: *C, target: *const std.Target) !std.ArrayList(u8) {
const gpa = self.base.comp.gpa;
var defines = std.ArrayList(u8).init(gpa);
errdefer defines.deinit();

View File

@ -208,7 +208,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Coff {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .coff);
const optimize_mode = comp.root_mod.optimize_mode;
const output_mode = comp.config.output_mode;
@ -752,7 +752,7 @@ fn shrinkAtom(coff: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
// capacity, insert a free list node for it.
}
fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8, resolve_relocs: bool) !void {
const atom = coff.getAtom(atom_index);
const sym = atom.getSymbol(coff);
const section = coff.sections.get(@intFromEnum(sym.section_number) - 1);
@ -774,11 +774,13 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
var relocs = std.ArrayList(*Relocation).init(gpa);
defer relocs.deinit();
if (coff.relocs.getPtr(atom_index)) |rels| {
try relocs.ensureTotalCapacityPrecise(rels.items.len);
for (rels.items) |*reloc| {
if (reloc.isResolvable(coff) and reloc.dirty) {
relocs.appendAssumeCapacity(reloc);
if (resolve_relocs) {
if (coff.relocs.getPtr(atom_index)) |rels| {
try relocs.ensureTotalCapacityPrecise(rels.items.len);
for (rels.items) |*reloc| {
if (reloc.isResolvable(coff) and reloc.dirty) {
relocs.appendAssumeCapacity(reloc);
}
}
}
}
@ -812,12 +814,15 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
}
}
coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
if (resolve_relocs) {
coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
}
try coff.pwriteAll(code, file_offset);
// Now we can mark the relocs as resolved.
while (relocs.pop()) |reloc| {
reloc.dirty = false;
if (resolve_relocs) {
// Now we can mark the relocs as resolved.
while (relocs.pop()) |reloc| {
reloc.dirty = false;
}
}
}
@ -914,6 +919,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
}
fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
if (!coff.base.comp.incremental) return;
// TODO: reverse-lookup might come in handy here
for (coff.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
@ -924,6 +930,7 @@ fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
}
fn markRelocsDirtyByAddress(coff: *Coff, addr: u32) void {
if (!coff.base.comp.incremental) return;
const got_moved = blk: {
const sect_id = coff.got_section_index orelse break :blk false;
break :blk coff.sections.items(.header)[sect_id].virtual_address >= addr;
@ -1129,7 +1136,7 @@ fn lowerConst(
log.debug("allocated atom for {s} at 0x{x}", .{ name, atom.getSymbol(coff).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
try coff.writeAtom(atom_index, code);
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
return .{ .ok = atom_index };
}
@ -1212,8 +1219,7 @@ fn updateLazySymbolAtom(
});
defer gpa.free(name);
const atom = coff.getAtomPtr(atom_index);
const local_sym_index = atom.getSymbolIndex().?;
const local_sym_index = coff.getAtomPtr(atom_index).getSymbolIndex().?;
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
try codegen.generateLazySymbol(
@ -1228,12 +1234,13 @@ fn updateLazySymbolAtom(
);
const code = code_buffer.items;
const code_len: u32 = @intCast(code.len);
const atom = coff.getAtomPtr(atom_index);
const symbol = atom.getSymbolPtr(coff);
try coff.setSymbolName(symbol, name);
symbol.section_number = @enumFromInt(section_index + 1);
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
const code_len: u32 = @intCast(code.len);
const vaddr = try coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
errdefer coff.freeAtom(atom_index);
@ -1244,7 +1251,7 @@ fn updateLazySymbolAtom(
symbol.value = vaddr;
try coff.addGotEntry(.{ .sym_index = local_sym_index });
try coff.writeAtom(atom_index, code);
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
}
pub fn getOrCreateAtomForLazySymbol(
@ -1328,7 +1335,7 @@ fn updateNavCode(
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
@ -1392,7 +1399,7 @@ fn updateNavCode(
};
}
coff.writeAtom(atom_index, code) catch |err| switch (err) {
coff.writeAtom(atom_index, code, coff.base.comp.incremental) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}),
};
@ -1430,7 +1437,7 @@ pub fn updateExports(
const first_exp = export_indices[0].ptr(zcu);
const res = try coff.lowerUav(pt, uav, .none, first_exp.src);
switch (res) {
.mcv => {},
.sym_index => {},
.fail => |em| {
// TODO maybe it's enough to return an error here and let Module.processExportsInner
// handle the error?
@ -1677,7 +1684,7 @@ fn flushInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
const amt = try coff.base.file.?.preadAll(code.items, file_offset);
if (amt != code.items.len) return error.InputOutput;
try coff.writeAtom(atom_index, code.items);
try coff.writeAtom(atom_index, code.items, true);
}
// Update GOT if it got moved in memory.
@ -1715,6 +1722,21 @@ fn flushInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
}
assert(!coff.imports_count_dirty);
// hack for stage2_x86_64 + coff
if (comp.compiler_rt_dyn_lib) |crt_file| {
const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
std.fs.path.basename(crt_file.full_object_path.sub_path),
});
defer gpa.free(compiler_rt_sub_path);
try crt_file.full_object_path.root_dir.handle.copyFile(
crt_file.full_object_path.sub_path,
coff.base.emit.root_dir.handle,
compiler_rt_sub_path,
.{},
);
}
}
pub fn getNavVAddr(
@ -1755,7 +1777,7 @@ pub fn lowerUav(
uav: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const val = Value.fromInterned(uav);
@ -1767,7 +1789,7 @@ pub fn lowerUav(
const atom = coff.getAtom(metadata.atom);
const existing_addr = atom.getSymbol(coff).value;
if (uav_alignment.check(existing_addr))
return .{ .mcv = .{ .load_symbol = atom.getSymbolIndex().? } };
return .{ .sym_index = atom.getSymbolIndex().? };
}
var name_buf: [32]u8 = undefined;
@ -1798,9 +1820,7 @@ pub fn lowerUav(
.atom = atom_index,
.section = coff.rdata_section_index.?,
});
return .{ .mcv = .{
.load_symbol = coff.getAtom(atom_index).getSymbolIndex().?,
} };
return .{ .sym_index = coff.getAtom(atom_index).getSymbolIndex().? };
}
pub fn getUavVAddr(
@ -2153,7 +2173,7 @@ fn writeDataDirectoriesHeaders(coff: *Coff) !void {
}
fn writeHeader(coff: *Coff) !void {
const target = coff.base.comp.root_mod.resolved_target.result;
const target = &coff.base.comp.root_mod.resolved_target.result;
const gpa = coff.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@ -2464,11 +2484,6 @@ const GetOrPutGlobalPtrResult = struct {
value_ptr: *SymbolWithLoc,
};
/// Used only for disambiguating local from global at relocation level.
/// TODO this must go away.
pub const global_symbol_bit: u32 = 0x80000000;
pub const global_symbol_mask: u32 = 0x7fffffff;
/// Return pointer to the global entry for `name` if one exists.
/// Puts a new global entry for `name` if one doesn't exist, and
/// returns a pointer to it.
@ -2800,7 +2815,7 @@ pub const Relocation = struct {
.ptr_width = coff.ptr_width,
};
const target = coff.base.comp.root_mod.resolved_target.result;
const target = &coff.base.comp.root_mod.resolved_target.result;
switch (target.cpu.arch) {
.aarch64 => reloc.resolveAarch64(ctx),
.x86, .x86_64 => reloc.resolveX86(ctx),

View File

@ -92,7 +92,7 @@ const DebugFrame = struct {
};
fn headerBytes(dwarf: *Dwarf) u32 {
const target = dwarf.bin_file.comp.root_mod.resolved_target.result;
const target = &dwarf.bin_file.comp.root_mod.resolved_target.result;
return @intCast(switch (dwarf.debug_frame.header.format) {
.none => return 0,
.debug_frame => dwarf.unitLengthBytes() + dwarf.sectionOffsetBytes() + 1 + "\x00".len + 1 + 1,
@ -2140,7 +2140,7 @@ fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
pub fn init(lf: *link.File, format: DW.Format) Dwarf {
const comp = lf.comp;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
return .{
.gpa = gpa,
.bin_file = lf,
@ -2573,7 +2573,7 @@ fn initWipNavInner(
try wip_nav.infoAddrSym(sym_index, 0);
wip_nav.func_high_pc = @intCast(wip_nav.debug_info.items.len);
try diw.writeInt(u32, 0, dwarf.endian);
const target = mod.resolved_target.result;
const target = &mod.resolved_target.result;
try uleb128(diw, switch (nav.status.fully_resolved.alignment) {
.none => target_info.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_info.minFunctionAlignment(target)),
@ -4529,7 +4529,7 @@ pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
dwarf.debug_aranges.section.dirty = false;
}
if (dwarf.debug_frame.section.dirty) {
const target = dwarf.bin_file.comp.root_mod.resolved_target.result;
const target = &dwarf.bin_file.comp.root_mod.resolved_target.result;
switch (dwarf.debug_frame.header.format) {
.none => {},
.debug_frame => unreachable,

View File

@ -196,7 +196,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Elf {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .elf);
const use_llvm = comp.config.use_llvm;
@ -473,7 +473,7 @@ pub fn lowerUav(
uav: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
return self.zigObjectPtr().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
}
@ -1073,7 +1073,7 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
const gpa = self.base.comp.gpa;
const diags = &self.base.comp.link_diags;
const target = self.base.comp.root_mod.resolved_target.result;
const target = &self.base.comp.root_mod.resolved_target.result;
const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
const default_sym_version = self.default_sym_version;
const file_handles = &self.file_handles;
@ -1104,7 +1104,7 @@ fn parseArchive(
diags: *Diags,
file_handles: *std.ArrayListUnmanaged(File.Handle),
files: *std.MultiArrayList(File.Entry),
target: std.Target,
target: *const std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
objects: *std.ArrayListUnmanaged(File.Index),
@ -1139,7 +1139,7 @@ fn parseDso(
dso: link.Input.Dso,
shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
files: *std.MultiArrayList(File.Entry),
target: std.Target,
target: *const std.Target,
) !void {
const tracy = trace(@src());
defer tracy.end();
@ -4121,8 +4121,8 @@ pub fn lsearch(comptime T: type, haystack: []const T, predicate: anytype) usize
return i;
}
pub fn getTarget(self: Elf) std.Target {
return self.base.comp.root_mod.resolved_target.result;
pub fn getTarget(self: *const Elf) *const std.Target {
return &self.base.comp.root_mod.resolved_target.result;
}
fn requiresThunks(self: Elf) bool {

View File

@ -69,7 +69,7 @@ pub fn parse(
/// For error reporting purposes only.
path: Path,
handle: fs.File,
target: std.Target,
target: *const std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
) !void {
@ -98,7 +98,7 @@ pub fn parseCommon(
diags: *Diags,
path: Path,
handle: fs.File,
target: std.Target,
target: *const std.Target,
) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
const file_size = (try handle.stat()).size;
@ -182,7 +182,7 @@ pub fn parseCommon(
pub fn validateEFlags(
diags: *Diags,
path: Path,
target: std.Target,
target: *const std.Target,
e_flags: elf.Word,
) !void {
switch (target.cpu.arch) {
@ -263,7 +263,7 @@ fn initAtoms(
path: Path,
handle: fs.File,
debug_fmt_strip: bool,
target: std.Target,
target: *const std.Target,
) !void {
const shdrs = self.shdrs.items;
try self.atoms.ensureTotalCapacityPrecise(gpa, shdrs.len);
@ -420,7 +420,7 @@ fn parseEhFrame(
gpa: Allocator,
handle: fs.File,
shndx: u32,
target: std.Target,
target: *const std.Target,
) !void {
const relocs_shndx = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_RELA => if (shdr.sh_info == shndx) break @as(u32, @intCast(i)),

View File

@ -96,7 +96,7 @@ pub fn parseHeader(
file_path: Path,
fs_file: std.fs.File,
stat: Stat,
target: std.Target,
target: *const std.Target,
) !Header {
var ehdr: elf.Elf64_Ehdr = undefined;
{

View File

@ -997,7 +997,7 @@ pub fn lowerUav(
uav: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const val = Value.fromInterned(uav);
@ -1010,7 +1010,7 @@ pub fn lowerUav(
const sym = self.symbol(metadata.symbol_index);
const existing_alignment = sym.atom(elf_file).?.alignment;
if (uav_alignment.order(existing_alignment).compare(.lte))
return .{ .mcv = .{ .load_symbol = metadata.symbol_index } };
return .{ .sym_index = metadata.symbol_index };
}
const osec = if (self.data_relro_index) |sym_index|
@ -1047,12 +1047,11 @@ pub fn lowerUav(
.{@errorName(e)},
) },
};
const sym_index = switch (res) {
.ok => |sym_index| sym_index,
.fail => |em| return .{ .fail = em },
};
try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index, .allocated = true });
return .{ .mcv = .{ .load_symbol = sym_index } };
switch (res) {
.sym_index => |sym_index| try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index, .allocated = true }),
.fail => {},
}
return res;
}
pub fn getOrCreateMetadataForLazySymbol(
@ -1271,7 +1270,7 @@ fn updateNavCode(
log.debug("updateNavCode {}({d})", .{ nav.fqn.fmt(ip), nav_index });
const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
@ -1692,11 +1691,6 @@ fn updateLazySymbol(
try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
}
const LowerConstResult = union(enum) {
ok: Symbol.Index,
fail: *Zcu.ErrorMsg,
};
fn lowerConst(
self: *ZigObject,
elf_file: *Elf,
@ -1706,7 +1700,7 @@ fn lowerConst(
required_alignment: InternPool.Alignment,
output_section_index: u32,
src_loc: Zcu.LazySrcLoc,
) !LowerConstResult {
) !codegen.SymbolResult {
const gpa = pt.zcu.gpa;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
@ -1740,7 +1734,7 @@ fn lowerConst(
try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
return .{ .ok = sym_index };
return .{ .sym_index = sym_index };
}
pub fn updateExports(
@ -1764,7 +1758,7 @@ pub fn updateExports(
const first_exp = export_indices[0].ptr(zcu);
const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src);
switch (res) {
.mcv => {},
.sym_index => {},
.fail => |em| {
// TODO maybe it's enough to return an error here and let Zcu.processExportsInner
// handle the error?

View File

@ -26,7 +26,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Goff {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const use_llvm = comp.config.use_llvm;
@ -59,7 +59,7 @@ pub fn open(
emit: Path,
options: link.File.OpenOptions,
) !*Goff {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .goff);
return createEmpty(arena, comp, emit, options);
}

View File

@ -30,7 +30,7 @@ const Coff = struct {
dllmain_crt_startup: bool,
},
fn init(comp: *Compilation, options: link.File.OpenOptions) !Coff {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
return .{
.image_base = options.image_base orelse switch (output_mode) {
@ -103,7 +103,7 @@ pub const Elf = struct {
fn init(comp: *Compilation, options: link.File.OpenOptions) !Elf {
const PtrWidth = enum { p32, p64 };
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
const is_dyn_lib = output_mode == .Lib and comp.config.link_mode == .dynamic;
const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
@ -202,7 +202,7 @@ pub fn createEmpty(
emit: Cache.Path,
options: link.File.OpenOptions,
) !*Lld {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
const optimize_mode = comp.root_mod.optimize_mode;
const is_native_os = comp.root_mod.resolved_target.is_native_os;
@ -342,7 +342,7 @@ fn linkAsArchive(lld: *Lld, arena: Allocator) !void {
const llvm_bindings = @import("../codegen/llvm/bindings.zig");
const llvm = @import("../codegen/llvm.zig");
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
llvm.initializeLLVMTarget(target.cpu.arch);
const bad = llvm_bindings.WriteArchive(
full_out_path_z,
@ -374,7 +374,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
const is_dyn_lib = comp.config.link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or comp.config.output_mode == .Exe;
const link_in_crt = comp.config.link_libc and is_exe_or_dyn_lib;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const optimize_mode = comp.root_mod.optimize_mode;
const entry_name: ?[]const u8 = switch (coff.entry) {
// This logic isn't quite right for disabled or enabled. No point in fixing it
@ -811,7 +811,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
const is_dyn_lib = link_mode == .dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
const have_dynamic_linker = link_mode == .dynamic and is_exe_or_dyn_lib;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const compiler_rt_path: ?Cache.Path = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
@ -1281,7 +1281,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
try spawnLld(comp, arena, argv.items);
}
}
fn getLDMOption(target: std.Target) ?[]const u8 {
fn getLDMOption(target: *const std.Target) ?[]const u8 {
// This should only return emulations understood by LLD's parseEmulation().
return switch (target.cpu.arch) {
.aarch64 => switch (target.os.tag) {
@ -1364,7 +1364,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
const shared_memory = comp.config.shared_memory;
const export_memory = comp.config.export_memory;
const import_memory = comp.config.import_memory;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const base = &lld.base;
const wasm = &lld.ofmt.wasm;

View File

@ -163,7 +163,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*MachO {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
assert(target.ofmt == .macho);
const gpa = comp.gpa;
@ -3092,7 +3092,7 @@ pub fn lowerUav(
uav: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
return self.getZigObject().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
}
@ -3545,8 +3545,8 @@ pub fn markDirty(self: *MachO, sect_index: u8) void {
}
}
pub fn getTarget(self: MachO) std.Target {
return self.base.comp.root_mod.resolved_target.result;
pub fn getTarget(self: *const MachO) *const std.Target {
return &self.base.comp.root_mod.resolved_target.result;
}
/// XNU starting with Big Sur running on arm64 is caching inodes of running binaries.
@ -4233,7 +4233,7 @@ pub const Platform = struct {
}
}
pub fn fromTarget(target: std.Target) Platform {
pub fn fromTarget(target: *const std.Target) Platform {
return .{
.os_tag = target.os.tag,
.abi = target.abi,

View File

@ -704,7 +704,7 @@ pub fn lowerUav(
uav: InternPool.Index,
explicit_alignment: Atom.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const val = Value.fromInterned(uav);
@ -716,7 +716,7 @@ pub fn lowerUav(
const sym = self.symbols.items[metadata.symbol_index];
const existing_alignment = sym.getAtom(macho_file).?.alignment;
if (uav_alignment.order(existing_alignment).compare(.lte))
return .{ .mcv = .{ .load_symbol = sym.nlist_idx } };
return .{ .sym_index = metadata.symbol_index };
}
var name_buf: [32]u8 = undefined;
@ -740,14 +740,11 @@ pub fn lowerUav(
.{@errorName(e)},
) },
};
const sym_index = switch (res) {
.ok => |sym_index| sym_index,
.fail => |em| return .{ .fail = em },
};
try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index });
return .{ .mcv = .{
.load_symbol = self.symbols.items[sym_index].nlist_idx,
} };
switch (res) {
.sym_index => |sym_index| try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index }),
.fail => {},
}
return res;
}
fn freeNavMetadata(self: *ZigObject, macho_file: *MachO, sym_index: Symbol.Index) void {
@ -948,7 +945,7 @@ fn updateNavCode(
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
@ -1187,11 +1184,6 @@ fn getNavOutputSection(
return macho_file.zig_data_sect_index.?;
}
const LowerConstResult = union(enum) {
ok: Symbol.Index,
fail: *Zcu.ErrorMsg,
};
fn lowerConst(
self: *ZigObject,
macho_file: *MachO,
@ -1201,7 +1193,7 @@ fn lowerConst(
required_alignment: Atom.Alignment,
output_section_index: u8,
src_loc: Zcu.LazySrcLoc,
) !LowerConstResult {
) !codegen.SymbolResult {
const gpa = macho_file.base.comp.gpa;
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
@ -1241,7 +1233,7 @@ fn lowerConst(
const file_offset = sect.offset + atom.value;
try macho_file.pwriteAll(code, file_offset);
return .{ .ok = sym_index };
return .{ .sym_index = sym_index };
}
pub fn updateExports(
@ -1265,7 +1257,7 @@ pub fn updateExports(
const first_exp = export_indices[0].ptr(zcu);
const res = try self.lowerUav(macho_file, pt, uav, .none, first_exp.src);
switch (res) {
.mcv => {},
.sym_index => {},
.fail => |em| {
// TODO maybe it's enough to return an error here and let Zcu.processExportsInner
// handle the error?

View File

@ -184,7 +184,7 @@ pub const Atom = struct {
// asserts that self.got_index != null
pub fn getOffsetTableAddress(self: Atom, plan9: *Plan9) u64 {
const target = plan9.base.comp.root_mod.resolved_target.result;
const target = &plan9.base.comp.root_mod.resolved_target.result;
const ptr_bytes = @divExact(target.ptrBitWidth(), 8);
const got_addr = plan9.bases.data;
const got_index = self.got_index.?;
@ -278,7 +278,7 @@ pub fn createEmpty(
emit: Path,
options: link.File.OpenOptions,
) !*Plan9 {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const gpa = comp.gpa;
const optimize_mode = comp.root_mod.optimize_mode;
const output_mode = comp.config.output_mode;
@ -394,7 +394,7 @@ pub fn updateFunc(
const zcu = pt.zcu;
const gpa = zcu.gpa;
const target = self.base.comp.root_mod.resolved_target.result;
const target = &self.base.comp.root_mod.resolved_target.result;
const func = zcu.funcInfo(func_index);
const atom_idx = try self.seeNav(pt, func.owner_nav);
@ -583,7 +583,7 @@ pub fn flush(
const comp = self.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
switch (comp.config.output_mode) {
.Exe => {},
@ -1153,7 +1153,7 @@ pub fn open(
emit: Path,
options: link.File.OpenOptions,
) !*Plan9 {
const target = comp.root_mod.resolved_target.result;
const target = &comp.root_mod.resolved_target.result;
const use_lld = build_options.have_llvm and comp.config.use_lld;
const use_llvm = comp.config.use_llvm;
@ -1358,7 +1358,7 @@ pub fn lowerUav(
uav: InternPool.Index,
explicit_alignment: InternPool.Alignment,
src_loc: Zcu.LazySrcLoc,
) !codegen.GenResult {
) !codegen.SymbolResult {
_ = explicit_alignment;
// example:
// const ty = mod.intern_pool.typeOf(decl_val).toType();
@ -1370,7 +1370,7 @@ pub fn lowerUav(
// ...
const gpa = self.base.comp.gpa;
const gop = try self.uavs.getOrPut(gpa, uav);
if (gop.found_existing) return .{ .mcv = .{ .load_direct = gop.value_ptr.* } };
if (gop.found_existing) return .{ .sym_index = gop.value_ptr.* };
const val = Value.fromInterned(uav);
const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(uav)});
@ -1395,7 +1395,7 @@ pub fn lowerUav(
.value = undefined,
.name = name,
};
return .{ .mcv = .{ .load_direct = index } };
return .{ .sym_index = index };
}
pub fn getUavVAddr(self: *Plan9, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {

Some files were not shown because too many files have changed in this diff Show More