x86_64: implement switch jump tables

This commit is contained in:
Jacob Young 2025-01-02 03:10:19 -05:00
parent ac1a975f9b
commit e5d5a8bc4e
33 changed files with 476 additions and 199 deletions

View File

@ -161,17 +161,17 @@ const WindowsImpl = struct {
} }
} }
if (comptime builtin.mode == .Debug) { if (builtin.mode == .Debug) {
// The internal state of the DebugMutex needs to be handled here as well. // The internal state of the DebugMutex needs to be handled here as well.
mutex.impl.locking_thread.store(0, .unordered); mutex.impl.locking_thread.store(0, .unordered);
} }
const rc = os.windows.kernel32.SleepConditionVariableSRW( const rc = os.windows.kernel32.SleepConditionVariableSRW(
&self.condition, &self.condition,
if (comptime builtin.mode == .Debug) &mutex.impl.impl.srwlock else &mutex.impl.srwlock, if (builtin.mode == .Debug) &mutex.impl.impl.srwlock else &mutex.impl.srwlock,
timeout_ms, timeout_ms,
0, // the srwlock was assumed to acquired in exclusive mode not shared 0, // the srwlock was assumed to acquired in exclusive mode not shared
); );
if (comptime builtin.mode == .Debug) { if (builtin.mode == .Debug) {
// The internal state of the DebugMutex needs to be handled here as well. // The internal state of the DebugMutex needs to be handled here as well.
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .unordered); mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .unordered);
} }

View File

@ -158,7 +158,7 @@ const FutexImpl = struct {
// On x86, use `lock bts` instead of `lock cmpxchg` as: // On x86, use `lock bts` instead of `lock cmpxchg` as:
// - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048 // - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048
// - `lock bts` is smaller instruction-wise which makes it better for inlining // - `lock bts` is smaller instruction-wise which makes it better for inlining
if (comptime builtin.target.cpu.arch.isX86()) { if (builtin.target.cpu.arch.isX86()) {
const locked_bit = @ctz(locked); const locked_bit = @ctz(locked);
return self.state.bitSet(locked_bit, .acquire) == 0; return self.state.bitSet(locked_bit, .acquire) == 0;
} }

View File

@ -179,7 +179,7 @@ pub fn dumpHexFallible(bytes: []const u8) !void {
/// TODO multithreaded awareness /// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void { pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
nosuspend { nosuspend {
if (comptime builtin.target.isWasm()) { if (builtin.target.isWasm()) {
if (native_os == .wasi) { if (native_os == .wasi) {
const stderr = io.getStdErr().writer(); const stderr = io.getStdErr().writer();
stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return; stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return;
@ -267,7 +267,7 @@ pub inline fn getContext(context: *ThreadContext) bool {
/// TODO multithreaded awareness /// TODO multithreaded awareness
pub fn dumpStackTraceFromBase(context: *ThreadContext) void { pub fn dumpStackTraceFromBase(context: *ThreadContext) void {
nosuspend { nosuspend {
if (comptime builtin.target.isWasm()) { if (builtin.target.isWasm()) {
if (native_os == .wasi) { if (native_os == .wasi) {
const stderr = io.getStdErr().writer(); const stderr = io.getStdErr().writer();
stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return; stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return;
@ -365,7 +365,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
/// TODO multithreaded awareness /// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void { pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
nosuspend { nosuspend {
if (comptime builtin.target.isWasm()) { if (builtin.target.isWasm()) {
if (native_os == .wasi) { if (native_os == .wasi) {
const stderr = io.getStdErr().writer(); const stderr = io.getStdErr().writer();
stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return; stderr.print("Unable to dump stack trace: not implemented for Wasm\n", .{}) catch return;

View File

@ -121,13 +121,13 @@ pub fn deinit(self: *SelfInfo) void {
} }
pub fn getModuleForAddress(self: *SelfInfo, address: usize) !*Module { pub fn getModuleForAddress(self: *SelfInfo, address: usize) !*Module {
if (comptime builtin.target.isDarwin()) { if (builtin.target.isDarwin()) {
return self.lookupModuleDyld(address); return self.lookupModuleDyld(address);
} else if (native_os == .windows) { } else if (native_os == .windows) {
return self.lookupModuleWin32(address); return self.lookupModuleWin32(address);
} else if (native_os == .haiku) { } else if (native_os == .haiku) {
return self.lookupModuleHaiku(address); return self.lookupModuleHaiku(address);
} else if (comptime builtin.target.isWasm()) { } else if (builtin.target.isWasm()) {
return self.lookupModuleWasm(address); return self.lookupModuleWasm(address);
} else { } else {
return self.lookupModuleDl(address); return self.lookupModuleDl(address);
@ -138,13 +138,13 @@ pub fn getModuleForAddress(self: *SelfInfo, address: usize) !*Module {
// This can be called when getModuleForAddress fails, so implementations should provide // This can be called when getModuleForAddress fails, so implementations should provide
// a path that doesn't rely on any side-effects of a prior successful module lookup. // a path that doesn't rely on any side-effects of a prior successful module lookup.
pub fn getModuleNameForAddress(self: *SelfInfo, address: usize) ?[]const u8 { pub fn getModuleNameForAddress(self: *SelfInfo, address: usize) ?[]const u8 {
if (comptime builtin.target.isDarwin()) { if (builtin.target.isDarwin()) {
return self.lookupModuleNameDyld(address); return self.lookupModuleNameDyld(address);
} else if (native_os == .windows) { } else if (native_os == .windows) {
return self.lookupModuleNameWin32(address); return self.lookupModuleNameWin32(address);
} else if (native_os == .haiku) { } else if (native_os == .haiku) {
return null; return null;
} else if (comptime builtin.target.isWasm()) { } else if (builtin.target.isWasm()) {
return null; return null;
} else { } else {
return self.lookupModuleNameDl(address); return self.lookupModuleNameDl(address);

View File

@ -890,7 +890,7 @@ test {
_ = @import("heap/memory_pool.zig"); _ = @import("heap/memory_pool.zig");
_ = ArenaAllocator; _ = ArenaAllocator;
_ = GeneralPurposeAllocator; _ = GeneralPurposeAllocator;
if (comptime builtin.target.isWasm()) { if (builtin.target.isWasm()) {
_ = WasmAllocator; _ = WasmAllocator;
_ = WasmPageAllocator; _ = WasmPageAllocator;
} }

View File

@ -2523,7 +2523,7 @@ pub const Const = struct {
/// Returns the number of leading zeros in twos-complement form. /// Returns the number of leading zeros in twos-complement form.
pub fn clz(a: Const, bits: Limb) Limb { pub fn clz(a: Const, bits: Limb) Limb {
// Limbs are stored in little-endian order but we need to iterate big-endian. // Limbs are stored in little-endian order but we need to iterate big-endian.
if (!a.positive) return 0; if (!a.positive and !a.eqlZero()) return 0;
var total_limb_lz: Limb = 0; var total_limb_lz: Limb = 0;
var i: usize = a.limbs.len; var i: usize = a.limbs.len;
const bits_per_limb = @bitSizeOf(Limb); const bits_per_limb = @bitSizeOf(Limb);

View File

@ -157,7 +157,7 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
return target; return target;
}, },
.freebsd => { .freebsd => {
if (comptime builtin.os.isAtLeast(.freebsd, .{ .major = 13, .minor = 0, .patch = 0 }) orelse false) { if (builtin.os.isAtLeast(.freebsd, .{ .major = 13, .minor = 0, .patch = 0 }) orelse false) {
var kfile: std.c.kinfo_file = undefined; var kfile: std.c.kinfo_file = undefined;
kfile.structsize = std.c.KINFO_FILE_SIZE; kfile.structsize = std.c.KINFO_FILE_SIZE;
switch (posix.errno(std.c.fcntl(fd, std.c.F.KINFO, @intFromPtr(&kfile)))) { switch (posix.errno(std.c.fcntl(fd, std.c.F.KINFO, @intFromPtr(&kfile)))) {

View File

@ -1061,7 +1061,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
// us INVALID_PARAMETER. // us INVALID_PARAMETER.
// The same reasoning for win10_rs5 as in os.renameatW() applies (FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE requires >= win10_rs5). // The same reasoning for win10_rs5 as in os.renameatW() applies (FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE requires >= win10_rs5).
var need_fallback = true; var need_fallback = true;
if (comptime builtin.target.os.version_range.windows.min.isAtLeast(.win10_rs5)) { if (builtin.target.os.version_range.windows.min.isAtLeast(.win10_rs5)) {
// Deletion with posix semantics if the filesystem supports it. // Deletion with posix semantics if the filesystem supports it.
var info = FILE_DISPOSITION_INFORMATION_EX{ var info = FILE_DISPOSITION_INFORMATION_EX{
.Flags = FILE_DISPOSITION_DELETE | .Flags = FILE_DISPOSITION_DELETE |

View File

@ -6819,7 +6819,7 @@ pub fn memfd_createZ(name: [*:0]const u8, flags: u32) MemFdCreateError!fd_t {
} }
}, },
.freebsd => { .freebsd => {
if (comptime builtin.os.version_range.semver.max.order(.{ .major = 13, .minor = 0, .patch = 0 }) == .lt) if (builtin.os.version_range.semver.max.order(.{ .major = 13, .minor = 0, .patch = 0 }) == .lt)
@compileError("memfd_create is unavailable on FreeBSD < 13.0"); @compileError("memfd_create is unavailable on FreeBSD < 13.0");
const rc = system.memfd_create(name, flags); const rc = system.memfd_create(name, flags);
switch (errno(rc)) { switch (errno(rc)) {

View File

@ -804,7 +804,7 @@ test "getrlimit and setrlimit" {
// //
// This happens for example if RLIMIT_MEMLOCK is bigger than ~2GiB. // This happens for example if RLIMIT_MEMLOCK is bigger than ~2GiB.
// In that case the following the limit would be RLIM_INFINITY and the following setrlimit fails with EPERM. // In that case the following the limit would be RLIM_INFINITY and the following setrlimit fails with EPERM.
if (comptime builtin.cpu.arch.isMIPS() and builtin.link_libc) { if (builtin.cpu.arch.isMIPS() and builtin.link_libc) {
if (limit.cur != linux.RLIM.INFINITY) { if (limit.cur != linux.RLIM.INFINITY) {
try posix.setrlimit(resource, limit); try posix.setrlimit(resource, limit);
} }

View File

@ -163,7 +163,7 @@ pub fn interlace(vecs: anytype) @Vector(vectorLength(@TypeOf(vecs[0])) * vecs.le
// The indices are correct. The problem seems to be with the @shuffle builtin. // The indices are correct. The problem seems to be with the @shuffle builtin.
// On MIPS, the test that interlaces small_base gives { 0, 2, 0, 0, 64, 255, 248, 200, 0, 0 }. // On MIPS, the test that interlaces small_base gives { 0, 2, 0, 0, 64, 255, 248, 200, 0, 0 }.
// Calling this with two inputs seems to work fine, but I'll let the compile error trigger for all inputs, just to be safe. // Calling this with two inputs seems to work fine, but I'll let the compile error trigger for all inputs, just to be safe.
comptime if (builtin.cpu.arch.isMIPS()) @compileError("TODO: Find out why interlace() doesn't work on MIPS"); if (builtin.cpu.arch.isMIPS()) @compileError("TODO: Find out why interlace() doesn't work on MIPS");
const VecType = @TypeOf(vecs[0]); const VecType = @TypeOf(vecs[0]);
const vecs_arr = @as([vecs.len]VecType, vecs); const vecs_arr = @as([vecs.len]VecType, vecs);
@ -248,7 +248,7 @@ test "vector patterns" {
try std.testing.expectEqual([8]u32{ 10, 20, 30, 40, 55, 66, 77, 88 }, join(base, other_base)); try std.testing.expectEqual([8]u32{ 10, 20, 30, 40, 55, 66, 77, 88 }, join(base, other_base));
try std.testing.expectEqual([2]u32{ 20, 30 }, extract(base, 1, 2)); try std.testing.expectEqual([2]u32{ 20, 30 }, extract(base, 1, 2));
if (comptime !builtin.cpu.arch.isMIPS()) { if (!builtin.cpu.arch.isMIPS()) {
try std.testing.expectEqual([8]u32{ 10, 55, 20, 66, 30, 77, 40, 88 }, interlace(.{ base, other_base })); try std.testing.expectEqual([8]u32{ 10, 55, 20, 66, 30, 77, 40, 88 }, interlace(.{ base, other_base }));
const small_braid = interlace(small_bases); const small_braid = interlace(small_bases);
@ -390,7 +390,7 @@ pub fn prefixScanWithFunc(
comptime identity: std.meta.Child(@TypeOf(vec)), comptime identity: std.meta.Child(@TypeOf(vec)),
) if (ErrorType == void) @TypeOf(vec) else ErrorType!@TypeOf(vec) { ) if (ErrorType == void) @TypeOf(vec) else ErrorType!@TypeOf(vec) {
// I haven't debugged this, but it might be a cousin of sorts to what's going on with interlace. // I haven't debugged this, but it might be a cousin of sorts to what's going on with interlace.
comptime if (builtin.cpu.arch.isMIPS()) @compileError("TODO: Find out why prefixScan doesn't work on MIPS"); if (builtin.cpu.arch.isMIPS()) @compileError("TODO: Find out why prefixScan doesn't work on MIPS");
const len = vectorLength(@TypeOf(vec)); const len = vectorLength(@TypeOf(vec));
@ -465,9 +465,7 @@ test "vector prefix scan" {
if ((builtin.cpu.arch == .armeb or builtin.cpu.arch == .thumbeb) and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22060 if ((builtin.cpu.arch == .armeb or builtin.cpu.arch == .thumbeb) and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/22060
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21893 if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21893
if (comptime builtin.cpu.arch.isMIPS()) { if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
return error.SkipZigTest;
}
const int_base = @Vector(4, i32){ 11, 23, 9, -21 }; const int_base = @Vector(4, i32){ 11, 23, 9, -21 };
const float_base = @Vector(4, f32){ 2, 0.5, -10, 6.54321 }; const float_base = @Vector(4, f32){ 2, 0.5, -10, 6.54321 };

View File

@ -83,7 +83,7 @@ pub fn detect(arena: Allocator, native_target: std.Target) !NativePaths {
// TODO: consider also adding homebrew paths // TODO: consider also adding homebrew paths
// TODO: consider also adding macports paths // TODO: consider also adding macports paths
if (comptime builtin.target.isDarwin()) { if (builtin.target.isDarwin()) {
if (std.zig.system.darwin.isSdkInstalled(arena)) sdk: { if (std.zig.system.darwin.isSdkInstalled(arena)) sdk: {
const sdk = std.zig.system.darwin.getSdk(arena, native_target) orelse break :sdk; const sdk = std.zig.system.darwin.getSdk(arena, native_target) orelse break :sdk;
try self.addLibDir(try std.fs.path.join(arena, &.{ sdk, "usr/lib" })); try self.addLibDir(try std.fs.path.join(arena, &.{ sdk, "usr/lib" }));

View File

@ -719,32 +719,25 @@ pub const SwitchBrTable = struct {
/// Caller owns the memory. /// Caller owns the memory.
pub fn getSwitchBr(l: Liveness, gpa: Allocator, inst: Air.Inst.Index, cases_len: u32) Allocator.Error!SwitchBrTable { pub fn getSwitchBr(l: Liveness, gpa: Allocator, inst: Air.Inst.Index, cases_len: u32) Allocator.Error!SwitchBrTable {
var index: usize = l.special.get(inst) orelse return SwitchBrTable{ var index: usize = l.special.get(inst) orelse return .{ .deaths = &.{} };
.deaths = &.{},
};
const else_death_count = l.extra[index]; const else_death_count = l.extra[index];
index += 1; index += 1;
var deaths = std.ArrayList([]const Air.Inst.Index).init(gpa); var deaths = try gpa.alloc([]const Air.Inst.Index, cases_len);
defer deaths.deinit(); errdefer gpa.free(deaths);
try deaths.ensureTotalCapacity(cases_len + 1);
var case_i: u32 = 0; var case_i: u32 = 0;
while (case_i < cases_len - 1) : (case_i += 1) { while (case_i < cases_len - 1) : (case_i += 1) {
const case_death_count: u32 = l.extra[index]; const case_death_count: u32 = l.extra[index];
index += 1; index += 1;
const case_deaths: []const Air.Inst.Index = @ptrCast(l.extra[index..][0..case_death_count]); deaths[case_i] = @ptrCast(l.extra[index..][0..case_death_count]);
index += case_death_count; index += case_death_count;
deaths.appendAssumeCapacity(case_deaths);
} }
{ {
// Else // Else
const else_deaths: []const Air.Inst.Index = @ptrCast(l.extra[index..][0..else_death_count]); deaths[case_i] = @ptrCast(l.extra[index..][0..else_death_count]);
deaths.appendAssumeCapacity(else_deaths);
} }
return SwitchBrTable{ return .{ .deaths = deaths };
.deaths = try deaths.toOwnedSlice(),
};
} }
/// Note that this information is technically redundant, but is useful for /// Note that this information is technically redundant, but is useful for

View File

@ -61,9 +61,10 @@ src_loc: Zcu.LazySrcLoc,
eflags_inst: ?Air.Inst.Index = null, eflags_inst: ?Air.Inst.Index = null,
/// MIR Instructions /// MIR Instructions
mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, mir_instructions: std.MultiArrayList(Mir.Inst) = .empty,
/// MIR extra data /// MIR extra data
mir_extra: std.ArrayListUnmanaged(u32) = .empty, mir_extra: std.ArrayListUnmanaged(u32) = .empty,
mir_table: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
/// Byte offset within the source file of the ending curly. /// Byte offset within the source file of the ending curly.
end_di_line: u32, end_di_line: u32,
@ -75,8 +76,8 @@ end_di_column: u32,
exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty, exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined, reused_operands: std.StaticBitSet(Liveness.bpi - 1) = undefined,
const_tracking: ConstTrackingMap = .{}, const_tracking: ConstTrackingMap = .empty,
inst_tracking: InstTrackingMap = .{}, inst_tracking: InstTrackingMap = .empty,
// Key is the block instruction // Key is the block instruction
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
@ -86,16 +87,26 @@ register_manager: RegisterManager = .{},
/// Generation of the current scope, increments by 1 for every entered scope. /// Generation of the current scope, increments by 1 for every entered scope.
scope_generation: u32 = 0, scope_generation: u32 = 0,
frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, frame_allocs: std.MultiArrayList(FrameAlloc) = .empty,
free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty, free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty,
frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .empty,
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
/// The state to restore before branching. /// The state to restore before branching.
state: State, state: State,
/// The branch target. /// The branch target.
target: Mir.Inst.Index, target: Mir.Inst.Index,
}) = .{}, }) = .empty,
loop_switches: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
start: u31,
len: u11,
min: Value,
else_relocs: union(enum) {
@"unreachable",
forward: std.ArrayListUnmanaged(Mir.Inst.Index),
backward: Mir.Inst.Index,
},
}) = .empty,
next_temp_index: Temp.Index = @enumFromInt(0), next_temp_index: Temp.Index = @enumFromInt(0),
temp_type: [Temp.Index.max]Type = undefined, temp_type: [Temp.Index.max]Type = undefined,
@ -904,6 +915,7 @@ pub fn generate(
function.free_frame_indices.deinit(gpa); function.free_frame_indices.deinit(gpa);
function.frame_locs.deinit(gpa); function.frame_locs.deinit(gpa);
function.loops.deinit(gpa); function.loops.deinit(gpa);
function.loop_switches.deinit(gpa);
var block_it = function.blocks.valueIterator(); var block_it = function.blocks.valueIterator();
while (block_it.next()) |block| block.deinit(gpa); while (block_it.next()) |block| block.deinit(gpa);
function.blocks.deinit(gpa); function.blocks.deinit(gpa);
@ -912,6 +924,7 @@ pub fn generate(
function.exitlude_jump_relocs.deinit(gpa); function.exitlude_jump_relocs.deinit(gpa);
function.mir_instructions.deinit(gpa); function.mir_instructions.deinit(gpa);
function.mir_extra.deinit(gpa); function.mir_extra.deinit(gpa);
function.mir_table.deinit(gpa);
} }
try function.inst_tracking.ensureTotalCapacity(gpa, Temp.Index.max); try function.inst_tracking.ensureTotalCapacity(gpa, Temp.Index.max);
for (0..Temp.Index.max) |temp_index| { for (0..Temp.Index.max) |temp_index| {
@ -978,6 +991,7 @@ pub fn generate(
var mir: Mir = .{ var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(), .instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa), .extra = try function.mir_extra.toOwnedSlice(gpa),
.table = try function.mir_table.toOwnedSlice(gpa),
.frame_locs = function.frame_locs.toOwnedSlice(), .frame_locs = function.frame_locs.toOwnedSlice(),
}; };
defer mir.deinit(gpa); defer mir.deinit(gpa);
@ -1012,7 +1026,6 @@ pub fn generate(
}, },
.prev_di_pc = 0, .prev_di_pc = 0,
}; };
defer emit.deinit();
emit.emitMir() catch |err| switch (err) { emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?), error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
@ -1056,6 +1069,7 @@ pub fn generateLazy(
defer { defer {
function.mir_instructions.deinit(gpa); function.mir_instructions.deinit(gpa);
function.mir_extra.deinit(gpa); function.mir_extra.deinit(gpa);
function.mir_table.deinit(gpa);
} }
function.genLazy(lazy_sym) catch |err| switch (err) { function.genLazy(lazy_sym) catch |err| switch (err) {
@ -1067,6 +1081,7 @@ pub fn generateLazy(
var mir: Mir = .{ var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(), .instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa), .extra = try function.mir_extra.toOwnedSlice(gpa),
.table = try function.mir_table.toOwnedSlice(gpa),
.frame_locs = function.frame_locs.toOwnedSlice(), .frame_locs = function.frame_locs.toOwnedSlice(),
}; };
defer mir.deinit(gpa); defer mir.deinit(gpa);
@ -1093,7 +1108,6 @@ pub fn generateLazy(
.prev_di_loc = undefined, // no debug info yet .prev_di_loc = undefined, // no debug info yet
.prev_di_pc = undefined, // no debug info yet .prev_di_pc = undefined, // no debug info yet
}; };
defer emit.deinit();
emit.emitMir() catch |err| switch (err) { emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?), error.LowerFail, error.EmitFail => return function.failMsg(emit.lower.err_msg.?),
error.InvalidInstruction => return function.fail("failed to find a viable x86 instruction (Zig compiler bug)", .{}), error.InvalidInstruction => return function.fail("failed to find a viable x86 instruction (Zig compiler bug)", .{}),
@ -1161,6 +1175,7 @@ fn formatWipMir(
.mir = .{ .mir = .{
.instructions = data.self.mir_instructions.slice(), .instructions = data.self.mir_instructions.slice(),
.extra = data.self.mir_extra.items, .extra = data.self.mir_extra.items,
.table = data.self.mir_table.items,
.frame_locs = (std.MultiArrayList(Mir.FrameLoc){}).slice(), .frame_locs = (std.MultiArrayList(Mir.FrameLoc){}).slice(),
}, },
.cc = .auto, .cc = .auto,
@ -20748,25 +20763,195 @@ fn lowerBlock(self: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index
self.getValueIfFree(tracking.short, inst); self.getValueIfFree(tracking.short, inst);
} }
fn lowerSwitchBr(self: *CodeGen, inst: Air.Inst.Index, switch_br: Air.UnwrappedSwitch, condition: MCValue) !void { fn lowerSwitchBr(
self: *CodeGen,
inst: Air.Inst.Index,
switch_br: Air.UnwrappedSwitch,
condition: MCValue,
condition_dies: bool,
is_loop: bool,
) !void {
const zcu = self.pt.zcu; const zcu = self.pt.zcu;
const condition_ty = self.typeOf(switch_br.operand); const condition_ty = self.typeOf(switch_br.operand);
const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.cases_len + 1);
defer self.gpa.free(liveness.deaths);
const signedness = switch (condition_ty.zigTypeTag(zcu)) { const ExpectedContents = extern struct {
.bool, .pointer => .unsigned, liveness_deaths: [1 << 8 | 1]Air.Inst.Index,
.int, .@"enum", .error_set => condition_ty.intInfo(zcu).signedness, bigint_limbs: [std.math.big.int.calcTwosCompLimbCount(1 << 8)]std.math.big.Limb,
else => unreachable, relocs: [1 << 6]Mir.Inst.Index,
}; };
var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) =
std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa);
const allocator = stack.get();
self.scope_generation += 1; self.scope_generation += 1;
const state = try self.saveState(); const state = try self.saveState();
var it = switch_br.iterateCases(); const liveness = try self.liveness.getSwitchBr(allocator, inst, switch_br.cases_len + 1);
while (it.next()) |case| { defer allocator.free(liveness.deaths);
var relocs = try self.gpa.alloc(Mir.Inst.Index, case.items.len + case.ranges.len);
defer self.gpa.free(relocs); if (!self.mod.pic and self.target.ofmt == .elf) table: {
var prong_items: u32 = 0;
var min: ?Value = null;
var max: ?Value = null;
{
var cases_it = switch_br.iterateCases();
while (cases_it.next()) |case| {
prong_items += @intCast(case.items.len + case.ranges.len);
for (case.items) |item| {
const val = Value.fromInterned(item.toInterned().?);
if (min == null or val.compareHetero(.lt, min.?, zcu)) min = val;
if (max == null or val.compareHetero(.gt, max.?, zcu)) max = val;
}
for (case.ranges) |range| {
const low = Value.fromInterned(range[0].toInterned().?);
if (min == null or low.compareHetero(.lt, min.?, zcu)) min = low;
const high = Value.fromInterned(range[1].toInterned().?);
if (max == null or high.compareHetero(.gt, max.?, zcu)) max = high;
}
}
}
// This condition also triggers for switches with no non-else prongs and switches on bool.
if (prong_items < 1 << 2 or prong_items > 1 << 8) break :table;
var min_space: Value.BigIntSpace = undefined;
const min_bigint = min.?.toBigInt(&min_space, zcu);
var max_space: Value.BigIntSpace = undefined;
const max_bigint = max.?.toBigInt(&max_space, zcu);
const limbs = try allocator.alloc(
std.math.big.Limb,
@max(min_bigint.limbs.len, max_bigint.limbs.len) + 1,
);
defer allocator.free(limbs);
const table_len = table_len: {
var table_len_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
table_len_bigint.sub(max_bigint, min_bigint);
assert(table_len_bigint.positive); // min <= max
break :table_len @as(u11, table_len_bigint.toConst().to(u10) catch break :table) + 1; // no more than a 1024 entry table
};
assert(prong_items <= table_len); // each prong item introduces at least one unique integer to the range
if (prong_items < table_len >> 2) break :table; // no more than 75% waste
const condition_index = if (condition_dies and condition.isModifiable()) condition else condition_index: {
const condition_index = try self.allocTempRegOrMem(condition_ty, true);
try self.genCopy(condition_ty, condition_index, condition, .{});
break :condition_index condition_index;
};
try self.spillEflagsIfOccupied();
if (min.?.orderAgainstZero(zcu).compare(.neq)) try self.genBinOpMir(
.{ ._, .sub },
condition_ty,
condition_index,
.{ .air_ref = Air.internedToRef(min.?.toIntern()) },
);
const else_reloc = if (switch_br.else_body_len > 0) else_reloc: {
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition_index, .{ .immediate = table_len - 1 });
break :else_reloc try self.asmJccReloc(.a, undefined);
} else undefined;
const table_start: u31 = @intCast(self.mir_table.items.len);
{
const condition_index_reg = if (condition_index.isRegister())
condition_index.getReg().?
else
try self.copyToTmpRegister(.usize, condition_index);
const condition_index_lock = self.register_manager.lockReg(condition_index_reg);
defer if (condition_index_lock) |lock| self.register_manager.unlockReg(lock);
try self.truncateRegister(condition_ty, condition_index_reg);
const ptr_size = @divExact(self.target.ptrBitWidth(), 8);
try self.asmMemory(.{ ._, .jmp }, .{
.base = .table,
.mod = .{ .rm = .{
.size = .ptr,
.index = registerAlias(condition_index_reg, ptr_size),
.scale = .fromFactor(@intCast(ptr_size)),
.disp = table_start * ptr_size,
} },
});
}
const else_reloc_marker: u32 = 0;
assert(self.mir_instructions.len > else_reloc_marker);
try self.mir_table.appendNTimes(self.gpa, else_reloc_marker, table_len);
if (is_loop) try self.loop_switches.putNoClobber(self.gpa, inst, .{
.start = table_start,
.len = table_len,
.min = min.?,
.else_relocs = if (switch_br.else_body_len > 0) .{ .forward = .empty } else .@"unreachable",
});
defer if (is_loop) {
var loop_switch_data = self.loop_switches.fetchRemove(inst).?.value;
switch (loop_switch_data.else_relocs) {
.@"unreachable", .backward => {},
.forward => |*else_relocs| else_relocs.deinit(self.gpa),
}
};
var cases_it = switch_br.iterateCases();
while (cases_it.next()) |case| {
{
const table = self.mir_table.items[table_start..][0..table_len];
for (case.items) |item| {
const val = Value.fromInterned(item.toInterned().?);
var val_space: Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space, zcu);
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
index_bigint.sub(val_bigint, min_bigint);
table[index_bigint.toConst().to(u10) catch unreachable] = @intCast(self.mir_instructions.len);
}
for (case.ranges) |range| {
var low_space: Value.BigIntSpace = undefined;
const low_bigint = Value.fromInterned(range[0].toInterned().?).toBigInt(&low_space, zcu);
var high_space: Value.BigIntSpace = undefined;
const high_bigint = Value.fromInterned(range[1].toInterned().?).toBigInt(&high_space, zcu);
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
index_bigint.sub(low_bigint, min_bigint);
const start = index_bigint.toConst().to(u10) catch unreachable;
index_bigint.sub(high_bigint, min_bigint);
const end = @as(u11, index_bigint.toConst().to(u10) catch unreachable) + 1;
@memset(table[start..end], @intCast(self.mir_instructions.len));
}
}
for (liveness.deaths[case.idx]) |operand| try self.processDeath(operand);
try self.genBodyBlock(case.body);
try self.restoreState(state, &.{}, .{
.emit_instructions = false,
.update_tracking = true,
.resurrect = true,
.close_scope = true,
});
}
if (switch_br.else_body_len > 0) {
const else_body = cases_it.elseBody();
const else_deaths = liveness.deaths.len - 1;
for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand);
self.performReloc(else_reloc);
if (is_loop) {
const loop_switch_data = self.loop_switches.getPtr(inst).?;
for (loop_switch_data.else_relocs.forward.items) |reloc| self.performReloc(reloc);
loop_switch_data.else_relocs.forward.deinit(self.gpa);
loop_switch_data.else_relocs = .{ .backward = @intCast(self.mir_instructions.len) };
}
for (self.mir_table.items[table_start..][0..table_len]) |*entry| if (entry.* == else_reloc_marker) {
entry.* = @intCast(self.mir_instructions.len);
};
try self.genBodyBlock(else_body);
try self.restoreState(state, &.{}, .{
.emit_instructions = false,
.update_tracking = true,
.resurrect = true,
.close_scope = true,
});
}
return;
}
const signedness = if (condition_ty.isAbiInt(zcu)) condition_ty.intInfo(zcu).signedness else .unsigned;
var cases_it = switch_br.iterateCases();
while (cases_it.next()) |case| {
var relocs = try allocator.alloc(Mir.Inst.Index, case.items.len + case.ranges.len);
defer allocator.free(relocs);
try self.spillEflagsIfOccupied(); try self.spillEflagsIfOccupied();
for (case.items, relocs[0..case.items.len]) |item, *reloc| { for (case.items, relocs[0..case.items.len]) |item, *reloc| {
@ -20849,9 +21034,8 @@ fn lowerSwitchBr(self: *CodeGen, inst: Air.Inst.Index, switch_br: Air.UnwrappedS
// Relocate the "skip" branch to fall through to the next case. // Relocate the "skip" branch to fall through to the next case.
self.performReloc(skip_case_reloc); self.performReloc(skip_case_reloc);
} }
if (switch_br.else_body_len > 0) { if (switch_br.else_body_len > 0) {
const else_body = it.elseBody(); const else_body = cases_it.elseBody();
const else_deaths = liveness.deaths.len - 1; const else_deaths = liveness.deaths.len - 1;
for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand); for (liveness.deaths[else_deaths]) |operand| try self.processDeath(operand);
@ -20873,11 +21057,11 @@ fn airSwitchBr(self: *CodeGen, inst: Air.Inst.Index) !void {
// If the condition dies here in this switch instruction, process // If the condition dies here in this switch instruction, process
// that death now instead of later as this has an effect on // that death now instead of later as this has an effect on
// whether it needs to be spilled in the branches // whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) { const condition_dies = self.liveness.operandDies(inst, 0);
if (condition_dies) {
if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst);
} }
try self.lowerSwitchBr(inst, switch_br, condition, condition_dies, false);
try self.lowerSwitchBr(inst, switch_br, condition);
// We already took care of pl_op.operand earlier, so there's nothing left to do // We already took care of pl_op.operand earlier, so there's nothing left to do
} }
@ -20915,7 +21099,7 @@ fn airLoopSwitchBr(self: *CodeGen, inst: Air.Inst.Index) !void {
// Stop tracking block result without forgetting tracking info // Stop tracking block result without forgetting tracking info
try self.freeValue(mat_cond); try self.freeValue(mat_cond);
try self.lowerSwitchBr(inst, switch_br, mat_cond); try self.lowerSwitchBr(inst, switch_br, mat_cond, true, true);
try self.processDeath(inst); try self.processDeath(inst);
} }
@ -20924,8 +21108,67 @@ fn airSwitchDispatch(self: *CodeGen, inst: Air.Inst.Index) !void {
const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br;
const block_ty = self.typeOfIndex(br.block_inst); const block_ty = self.typeOfIndex(br.block_inst);
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
const loop_data = self.loops.getPtr(br.block_inst).?; const loop_data = self.loops.getPtr(br.block_inst).?;
if (self.loop_switches.getPtr(br.block_inst)) |table| {
// Process operand death so that it is properly accounted for in the State below.
const condition_dies = self.liveness.operandDies(inst, 0);
try self.restoreState(loop_data.state, &.{}, .{
.emit_instructions = true,
.update_tracking = false,
.resurrect = false,
.close_scope = false,
});
const condition_ty = self.typeOf(br.operand);
const condition = try self.resolveInst(br.operand);
const condition_index = if (condition_dies and condition.isModifiable()) condition else condition_index: {
const condition_index = try self.allocTempRegOrMem(condition_ty, true);
try self.genCopy(condition_ty, condition_index, condition, .{});
break :condition_index condition_index;
};
try self.spillEflagsIfOccupied();
if (table.min.orderAgainstZero(self.pt.zcu).compare(.neq)) try self.genBinOpMir(
.{ ._, .sub },
condition_ty,
condition_index,
.{ .air_ref = Air.internedToRef(table.min.toIntern()) },
);
switch (table.else_relocs) {
.@"unreachable" => {},
.forward => |*else_relocs| {
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition_index, .{ .immediate = table.len - 1 });
try else_relocs.append(self.gpa, try self.asmJccReloc(.a, undefined));
},
.backward => |else_reloc| {
try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition_index, .{ .immediate = table.len - 1 });
_ = try self.asmJccReloc(.a, else_reloc);
},
}
{
const condition_index_reg = if (condition_index.isRegister())
condition_index.getReg().?
else
try self.copyToTmpRegister(.usize, condition_index);
const condition_index_lock = self.register_manager.lockReg(condition_index_reg);
defer if (condition_index_lock) |lock| self.register_manager.unlockReg(lock);
try self.truncateRegister(condition_ty, condition_index_reg);
const ptr_size = @divExact(self.target.ptrBitWidth(), 8);
try self.asmMemory(.{ ._, .jmp }, .{
.base = .table,
.mod = .{ .rm = .{
.size = .ptr,
.index = registerAlias(condition_index_reg, ptr_size),
.scale = .fromFactor(@intCast(ptr_size)),
.disp = @intCast(table.start * ptr_size),
} },
});
}
return self.finishAir(inst, .none, .{ br.operand, .none, .none });
}
const block_tracking = self.inst_tracking.getPtr(br.block_inst).?;
done: { done: {
try self.getValue(block_tracking.short, null); try self.getValue(block_tracking.short, null);
const src_mcv = try self.resolveInst(br.operand); const src_mcv = try self.resolveInst(br.operand);
@ -22543,6 +22786,7 @@ fn genSetMem(
.none => .{ .immediate = @bitCast(@as(i64, disp)) }, .none => .{ .immediate = @bitCast(@as(i64, disp)) },
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
.frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
.table => unreachable,
.reloc => |sym_index| .{ .lea_symbol = .{ .sym_index = sym_index, .off = disp } }, .reloc => |sym_index| .{ .lea_symbol = .{ .sym_index = sym_index, .off = disp } },
}; };
switch (src_mcv) { switch (src_mcv) {
@ -22652,6 +22896,7 @@ fn genSetMem(
.index = frame_index, .index = frame_index,
.off = disp, .off = disp,
}).compare(.gte, src_align), }).compare(.gte, src_align),
.table => unreachable,
.reloc => false, .reloc => false,
})).write( })).write(
self, self,
@ -23260,6 +23505,7 @@ fn airCmpxchg(self: *CodeGen, inst: Air.Inst.Index) !void {
const ptr_lock = switch (ptr_mem.base) { const ptr_lock = switch (ptr_mem.base) {
.none, .frame, .reloc => null, .none, .frame, .reloc => null,
.reg => |reg| self.register_manager.lockReg(reg), .reg => |reg| self.register_manager.lockReg(reg),
.table => unreachable,
}; };
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
@ -23327,6 +23573,7 @@ fn atomicOp(
const mem_lock = switch (ptr_mem.base) { const mem_lock = switch (ptr_mem.base) {
.none, .frame, .reloc => null, .none, .frame, .reloc => null,
.reg => |reg| self.register_manager.lockReg(reg), .reg => |reg| self.register_manager.lockReg(reg),
.table => unreachable,
}; };
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock); defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);

View File

@ -10,22 +10,21 @@ prev_di_loc: Loc,
/// Relative to the beginning of `code`. /// Relative to the beginning of `code`.
prev_di_pc: usize, prev_di_pc: usize,
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
relocs: std.ArrayListUnmanaged(Reloc) = .empty,
pub const Error = Lower.Error || error{ pub const Error = Lower.Error || error{
EmitFail, EmitFail,
} || link.File.UpdateDebugInfoError; } || link.File.UpdateDebugInfoError;
pub fn emitMir(emit: *Emit) Error!void { pub fn emitMir(emit: *Emit) Error!void {
const gpa = emit.lower.bin_file.comp.gpa; const gpa = emit.lower.bin_file.comp.gpa;
const code_offset_mapping = try emit.lower.allocator.alloc(u32, emit.lower.mir.instructions.len);
defer emit.lower.allocator.free(code_offset_mapping);
var relocs: std.ArrayListUnmanaged(Reloc) = .empty;
defer relocs.deinit(emit.lower.allocator);
var table_relocs: std.ArrayListUnmanaged(TableReloc) = .empty;
defer table_relocs.deinit(emit.lower.allocator);
for (0..emit.lower.mir.instructions.len) |mir_i| { for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i); const mir_index: Mir.Inst.Index = @intCast(mir_i);
try emit.code_offset_mapping.putNoClobber( code_offset_mapping[mir_index] = @intCast(emit.code.items.len);
emit.lower.allocator,
mir_index,
@intCast(emit.code.items.len),
);
const lowered = try emit.lower.lowerMir(mir_index); const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs; var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| { for (lowered.insts, 0..) |lowered_inst, lowered_index| {
@ -89,13 +88,17 @@ pub fn emitMir(emit: *Emit) Error!void {
lowered_relocs[0].lowered_inst_index == lowered_index) : ({ lowered_relocs[0].lowered_inst_index == lowered_index) : ({
lowered_relocs = lowered_relocs[1..]; lowered_relocs = lowered_relocs[1..];
}) switch (lowered_relocs[0].target) { }) switch (lowered_relocs[0].target) {
.inst => |target| try emit.relocs.append(emit.lower.allocator, .{ .inst => |target| try relocs.append(emit.lower.allocator, .{
.source = start_offset, .source = start_offset,
.source_offset = end_offset - 4, .source_offset = end_offset - 4,
.target = target, .target = target,
.target_offset = lowered_relocs[0].off, .target_offset = lowered_relocs[0].off,
.length = @intCast(end_offset - start_offset), .length = @intCast(end_offset - start_offset),
}), }),
.table => try table_relocs.append(emit.lower.allocator, .{
.source_offset = end_offset - 4,
.target_offset = lowered_relocs[0].off,
}),
.linker_extern_fn => |sym_index| if (emit.lower.bin_file.cast(.elf)) |elf_file| { .linker_extern_fn => |sym_index| if (emit.lower.bin_file.cast(.elf)) |elf_file| {
// Add relocation to the decl. // Add relocation to the decl.
const zo = elf_file.zigObjectPtr().?; const zo = elf_file.zigObjectPtr().?;
@ -103,7 +106,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const r_type = @intFromEnum(std.elf.R_X86_64.PLT32); const r_type = @intFromEnum(std.elf.R_X86_64.PLT32);
try atom_ptr.addReloc(gpa, .{ try atom_ptr.addReloc(gpa, .{
.r_offset = end_offset - 4, .r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_info = @as(u64, sym_index) << 32 | r_type,
.r_addend = lowered_relocs[0].off - 4, .r_addend = lowered_relocs[0].off - 4,
}, zo); }, zo);
} else if (emit.lower.bin_file.cast(.macho)) |macho_file| { } else if (emit.lower.bin_file.cast(.macho)) |macho_file| {
@ -150,7 +153,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const r_type = @intFromEnum(std.elf.R_X86_64.TLSLD); const r_type = @intFromEnum(std.elf.R_X86_64.TLSLD);
try atom.addReloc(gpa, .{ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4, .r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_info = @as(u64, sym_index) << 32 | r_type,
.r_addend = lowered_relocs[0].off - 4, .r_addend = lowered_relocs[0].off - 4,
}, zo); }, zo);
}, },
@ -161,7 +164,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const r_type = @intFromEnum(std.elf.R_X86_64.DTPOFF32); const r_type = @intFromEnum(std.elf.R_X86_64.DTPOFF32);
try atom.addReloc(gpa, .{ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4, .r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_info = @as(u64, sym_index) << 32 | r_type,
.r_addend = lowered_relocs[0].off, .r_addend = lowered_relocs[0].off,
}, zo); }, zo);
}, },
@ -176,7 +179,7 @@ pub fn emitMir(emit: *Emit) Error!void {
@intFromEnum(std.elf.R_X86_64.PC32); @intFromEnum(std.elf.R_X86_64.PC32);
try atom.addReloc(gpa, .{ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4, .r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_info = @as(u64, sym_index) << 32 | r_type,
.r_addend = lowered_relocs[0].off - 4, .r_addend = lowered_relocs[0].off - 4,
}, zo); }, zo);
} else { } else {
@ -186,7 +189,7 @@ pub fn emitMir(emit: *Emit) Error!void {
@intFromEnum(std.elf.R_X86_64.@"32"); @intFromEnum(std.elf.R_X86_64.@"32");
try atom.addReloc(gpa, .{ try atom.addReloc(gpa, .{
.r_offset = end_offset - 4, .r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, .r_info = @as(u64, sym_index) << 32 | r_type,
.r_addend = lowered_relocs[0].off, .r_addend = lowered_relocs[0].off,
}, zo); }, zo);
} }
@ -412,7 +415,7 @@ pub fn emitMir(emit: *Emit) Error!void {
loc_buf[0] = switch (mem.base()) { loc_buf[0] = switch (mem.base()) {
.none => .{ .constu = 0 }, .none => .{ .constu = 0 },
.reg => |reg| .{ .breg = reg.dwarfNum() }, .reg => |reg| .{ .breg = reg.dwarfNum() },
.frame => unreachable, .frame, .table => unreachable,
.reloc => |sym_index| .{ .addr = .{ .sym = sym_index } }, .reloc => |sym_index| .{ .addr = .{ .sym = sym_index } },
}; };
break :base &loc_buf[0]; break :base &loc_buf[0];
@ -463,13 +466,40 @@ pub fn emitMir(emit: *Emit) Error!void {
} }
} }
} }
try emit.fixupRelocs(); {
// TODO this function currently assumes all relocs via JMP/CALL instructions are 32bit in size.
// This should be reversed like it is done in aarch64 MIR emit code: start with the smallest
// possible resolution, i.e., 8bit, and iteratively converge on the minimum required resolution
// until the entire decl is correctly emitted with all JMP/CALL instructions within range.
for (relocs.items) |reloc| {
const target = code_offset_mapping[reloc.target];
const disp = @as(i64, @intCast(target)) - @as(i64, @intCast(reloc.source + reloc.length)) + reloc.target_offset;
std.mem.writeInt(i32, emit.code.items[reloc.source_offset..][0..4], @intCast(disp), .little);
} }
}
if (emit.lower.mir.table.len > 0) {
if (emit.lower.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(emit.atom_index).atom(elf_file).?;
pub fn deinit(emit: *Emit) void { const ptr_size = @divExact(emit.lower.target.ptrBitWidth(), 8);
emit.relocs.deinit(emit.lower.allocator); var table_offset = std.mem.alignForward(u32, @intCast(emit.code.items.len), ptr_size);
emit.code_offset_mapping.deinit(emit.lower.allocator); for (table_relocs.items) |table_reloc| try atom.addReloc(gpa, .{
emit.* = undefined; .r_offset = table_reloc.source_offset,
.r_info = @as(u64, emit.atom_index) << 32 | @intFromEnum(std.elf.R_X86_64.@"32"),
.r_addend = @as(i64, table_offset) + table_reloc.target_offset,
}, zo);
for (emit.lower.mir.table) |entry| {
try atom.addReloc(gpa, .{
.r_offset = table_offset,
.r_info = @as(u64, emit.atom_index) << 32 | @intFromEnum(std.elf.R_X86_64.@"64"),
.r_addend = code_offset_mapping[entry],
}, zo);
table_offset += ptr_size;
}
try emit.code.appendNTimes(gpa, 0, table_offset - emit.code.items.len);
} else unreachable;
}
} }
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error { fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error {
@ -481,7 +511,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error {
const Reloc = struct { const Reloc = struct {
/// Offset of the instruction. /// Offset of the instruction.
source: usize, source: u32,
/// Offset of the relocation within the instruction. /// Offset of the relocation within the instruction.
source_offset: u32, source_offset: u32,
/// Target of the relocation. /// Target of the relocation.
@ -492,18 +522,12 @@ const Reloc = struct {
length: u5, length: u5,
}; };
fn fixupRelocs(emit: *Emit) Error!void { const TableReloc = struct {
// TODO this function currently assumes all relocs via JMP/CALL instructions are 32bit in size. /// Offset of the relocation.
// This should be reversed like it is done in aarch64 MIR emit code: start with the smallest source_offset: u32,
// possible resolution, i.e., 8bit, and iteratively converge on the minimum required resolution /// Offset from the start of the table.
// until the entire decl is correctly emitted with all JMP/CALL instructions within range. target_offset: i32,
for (emit.relocs.items) |reloc| { };
const target = emit.code_offset_mapping.get(reloc.target) orelse
return emit.fail("JMP/CALL relocation target not found!", .{});
const disp = @as(i64, @intCast(target)) - @as(i64, @intCast(reloc.source + reloc.length)) + reloc.target_offset;
std.mem.writeInt(i32, emit.code.items[reloc.source_offset..][0..4], @intCast(disp), .little);
}
}
const Loc = struct { const Loc = struct {
line: u32, line: u32,

View File

@ -57,6 +57,7 @@ pub const Reloc = struct {
const Target = union(enum) { const Target = union(enum) {
inst: Mir.Inst.Index, inst: Mir.Inst.Index,
table,
linker_reloc: u32, linker_reloc: u32,
linker_tlsld: u32, linker_tlsld: u32,
linker_dtpoff: u32, linker_dtpoff: u32,
@ -348,7 +349,7 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
return error.LowerFail; return error.LowerFail;
} }
pub fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { pub fn imm(lower: *const Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
return switch (ops) { return switch (ops) {
.rri_s, .rri_s,
.ri_s, .ri_s,
@ -379,8 +380,16 @@ pub fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
}; };
} }
pub fn mem(lower: Lower, payload: u32) Memory { pub fn mem(lower: *Lower, payload: u32) Memory {
return lower.mir.resolveFrameLoc(lower.mir.extraData(Mir.Memory, payload).data).decode(); var m = lower.mir.resolveFrameLoc(lower.mir.extraData(Mir.Memory, payload).data).decode();
switch (m) {
.sib => |*sib| switch (sib.base) {
else => {},
.table => sib.disp = lower.reloc(.table, sib.disp).signed,
},
else => {},
}
return m;
} }
fn reloc(lower: *Lower, target: Reloc.Target, off: i32) Immediate { fn reloc(lower: *Lower, target: Reloc.Target, off: i32) Immediate {

View File

@ -9,6 +9,7 @@
instructions: std.MultiArrayList(Inst).Slice, instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value. /// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32, extra: []const u32,
table: []const Inst.Index,
frame_locs: std.MultiArrayList(FrameLoc).Slice, frame_locs: std.MultiArrayList(FrameLoc).Slice,
pub const Inst = struct { pub const Inst = struct {
@ -1237,7 +1238,7 @@ pub const Memory = struct {
size: bits.Memory.Size, size: bits.Memory.Size,
index: Register, index: Register,
scale: bits.Memory.Scale, scale: bits.Memory.Scale,
_: u16 = undefined, _: u15 = undefined,
}; };
pub fn encode(mem: bits.Memory) Memory { pub fn encode(mem: bits.Memory) Memory {
@ -1260,7 +1261,7 @@ pub const Memory = struct {
}, },
}, },
.base = switch (mem.base) { .base = switch (mem.base) {
.none => undefined, .none, .table => undefined,
.reg => |reg| @intFromEnum(reg), .reg => |reg| @intFromEnum(reg),
.frame => |frame_index| @intFromEnum(frame_index), .frame => |frame_index| @intFromEnum(frame_index),
.reloc => |sym_index| sym_index, .reloc => |sym_index| sym_index,
@ -1289,6 +1290,7 @@ pub const Memory = struct {
.none => .none, .none => .none,
.reg => .{ .reg = @enumFromInt(mem.base) }, .reg => .{ .reg = @enumFromInt(mem.base) },
.frame => .{ .frame = @enumFromInt(mem.base) }, .frame => .{ .frame = @enumFromInt(mem.base) },
.table => .table,
.reloc => .{ .reloc = mem.base }, .reloc => .{ .reloc = mem.base },
}, },
.scale_index = switch (mem.info.index) { .scale_index = switch (mem.info.index) {
@ -1317,6 +1319,7 @@ pub const Memory = struct {
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa); mir.instructions.deinit(gpa);
gpa.free(mir.extra); gpa.free(mir.extra);
gpa.free(mir.table);
mir.frame_locs.deinit(gpa); mir.frame_locs.deinit(gpa);
mir.* = undefined; mir.* = undefined;
} }
@ -1352,7 +1355,7 @@ pub fn resolveFrameAddr(mir: Mir, frame_addr: bits.FrameAddr) bits.RegisterOffse
pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory { pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory {
return switch (mem.info.base) { return switch (mem.info.base) {
.none, .reg, .reloc => mem, .none, .reg, .table, .reloc => mem,
.frame => if (mir.frame_locs.len > 0) .{ .frame => if (mir.frame_locs.len > 0) .{
.info = .{ .info = .{
.base = .reg, .base = .reg,

View File

@ -482,17 +482,18 @@ pub const Memory = struct {
base: Base = .none, base: Base = .none,
mod: Mod = .{ .rm = .{} }, mod: Mod = .{ .rm = .{} },
pub const Base = union(enum(u2)) { pub const Base = union(enum(u3)) {
none, none,
reg: Register, reg: Register,
frame: FrameIndex, frame: FrameIndex,
table,
reloc: u32, reloc: u32,
pub const Tag = @typeInfo(Base).@"union".tag_type.?; pub const Tag = @typeInfo(Base).@"union".tag_type.?;
pub fn isExtended(self: Base) bool { pub fn isExtended(self: Base) bool {
return switch (self) { return switch (self) {
.none, .frame, .reloc => false, // rsp, rbp, and rip are not extended .none, .frame, .table, .reloc => false, // rsp, rbp, and rip are not extended
.reg => |reg| reg.isExtended(), .reg => |reg| reg.isExtended(),
}; };
} }

View File

@ -138,7 +138,7 @@ pub const Instruction = struct {
.moffs => true, .moffs => true,
.rip => false, .rip => false,
.sib => |s| switch (s.base) { .sib => |s| switch (s.base) {
.none, .frame, .reloc => false, .none, .frame, .table, .reloc => false,
.reg => |reg| reg.class() == .segment, .reg => |reg| reg.class() == .segment,
}, },
}; };
@ -161,9 +161,9 @@ pub const Instruction = struct {
pub fn disp(mem: Memory) Immediate { pub fn disp(mem: Memory) Immediate {
return switch (mem) { return switch (mem) {
.sib => |s| Immediate.s(s.disp), .sib => |s| .s(s.disp),
.rip => |r| Immediate.s(r.disp), .rip => |r| .s(r.disp),
.moffs => |m| Immediate.u(m.offset), .moffs => |m| .u(m.offset),
}; };
} }
@ -277,6 +277,7 @@ pub const Instruction = struct {
.none => any = false, .none => any = false,
.reg => |reg| try writer.print("{s}", .{@tagName(reg)}), .reg => |reg| try writer.print("{s}", .{@tagName(reg)}),
.frame => |frame_index| try writer.print("{}", .{frame_index}), .frame => |frame_index| try writer.print("{}", .{frame_index}),
.table => try writer.print("Table", .{}),
.reloc => |sym_index| try writer.print("Symbol({d})", .{sym_index}), .reloc => |sym_index| try writer.print("Symbol({d})", .{sym_index}),
} }
if (mem.scaleIndex()) |si| { if (mem.scaleIndex()) |si| {
@ -614,7 +615,7 @@ pub const Instruction = struct {
switch (mem) { switch (mem) {
.moffs => unreachable, .moffs => unreachable,
.sib => |sib| switch (sib.base) { .sib => |sib| switch (sib.base) {
.none => { .none, .table => {
try encoder.modRm_SIBDisp0(operand_enc); try encoder.modRm_SIBDisp0(operand_enc);
if (mem.scaleIndex()) |si| { if (mem.scaleIndex()) |si| {
const scale = math.log2_int(u4, si.scale); const scale = math.log2_int(u4, si.scale);
@ -1191,7 +1192,7 @@ const TestEncode = struct {
) !void { ) !void {
var stream = std.io.fixedBufferStream(&enc.buffer); var stream = std.io.fixedBufferStream(&enc.buffer);
var count_writer = std.io.countingWriter(stream.writer()); var count_writer = std.io.countingWriter(stream.writer());
const inst = try Instruction.new(.none, mnemonic, ops); const inst: Instruction = try .new(.none, mnemonic, ops);
try inst.encode(count_writer.writer(), .{}); try inst.encode(count_writer.writer(), .{});
enc.index = count_writer.bytes_written; enc.index = count_writer.bytes_written;
} }
@ -1205,9 +1206,9 @@ test "encode" {
var buf = std.ArrayList(u8).init(testing.allocator); var buf = std.ArrayList(u8).init(testing.allocator);
defer buf.deinit(); defer buf.deinit();
const inst = try Instruction.new(.none, .mov, &.{ const inst: Instruction = try .new(.none, .mov, &.{
.{ .reg = .rbx }, .{ .reg = .rbx },
.{ .imm = Instruction.Immediate.u(4) }, .{ .imm = .u(4) },
}); });
try inst.encode(buf.writer(), .{}); try inst.encode(buf.writer(), .{});
try testing.expectEqualSlices(u8, &.{ 0x48, 0xc7, 0xc3, 0x4, 0x0, 0x0, 0x0 }, buf.items); try testing.expectEqualSlices(u8, &.{ 0x48, 0xc7, 0xc3, 0x4, 0x0, 0x0, 0x0 }, buf.items);
@ -1217,47 +1218,47 @@ test "lower I encoding" {
var enc = TestEncode{}; var enc = TestEncode{};
try enc.encode(.push, &.{ try enc.encode(.push, &.{
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x6A\x10", enc.code(), "push 0x10"); try expectEqualHexStrings("\x6A\x10", enc.code(), "push 0x10");
try enc.encode(.push, &.{ try enc.encode(.push, &.{
.{ .imm = Instruction.Immediate.u(0x1000) }, .{ .imm = .u(0x1000) },
}); });
try expectEqualHexStrings("\x66\x68\x00\x10", enc.code(), "push 0x1000"); try expectEqualHexStrings("\x66\x68\x00\x10", enc.code(), "push 0x1000");
try enc.encode(.push, &.{ try enc.encode(.push, &.{
.{ .imm = Instruction.Immediate.u(0x10000000) }, .{ .imm = .u(0x10000000) },
}); });
try expectEqualHexStrings("\x68\x00\x00\x00\x10", enc.code(), "push 0x10000000"); try expectEqualHexStrings("\x68\x00\x00\x00\x10", enc.code(), "push 0x10000000");
try enc.encode(.adc, &.{ try enc.encode(.adc, &.{
.{ .reg = .rax }, .{ .reg = .rax },
.{ .imm = Instruction.Immediate.u(0x10000000) }, .{ .imm = .u(0x10000000) },
}); });
try expectEqualHexStrings("\x48\x15\x00\x00\x00\x10", enc.code(), "adc rax, 0x10000000"); try expectEqualHexStrings("\x48\x15\x00\x00\x00\x10", enc.code(), "adc rax, 0x10000000");
try enc.encode(.add, &.{ try enc.encode(.add, &.{
.{ .reg = .al }, .{ .reg = .al },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x04\x10", enc.code(), "add al, 0x10"); try expectEqualHexStrings("\x04\x10", enc.code(), "add al, 0x10");
try enc.encode(.add, &.{ try enc.encode(.add, &.{
.{ .reg = .rax }, .{ .reg = .rax },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10"); try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10");
try enc.encode(.sbb, &.{ try enc.encode(.sbb, &.{
.{ .reg = .ax }, .{ .reg = .ax },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x66\x1D\x10\x00", enc.code(), "sbb ax, 0x10"); try expectEqualHexStrings("\x66\x1D\x10\x00", enc.code(), "sbb ax, 0x10");
try enc.encode(.xor, &.{ try enc.encode(.xor, &.{
.{ .reg = .al }, .{ .reg = .al },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x34\x10", enc.code(), "xor al, 0x10"); try expectEqualHexStrings("\x34\x10", enc.code(), "xor al, 0x10");
} }
@ -1267,43 +1268,43 @@ test "lower MI encoding" {
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r12 }, .{ .reg = .r12 },
.{ .imm = Instruction.Immediate.u(0x1000) }, .{ .imm = .u(0x1000) },
}); });
try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000"); try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .mem = Instruction.Memory.initSib(.byte, .{ .base = .{ .reg = .r12 } }) }, .{ .mem = Instruction.Memory.initSib(.byte, .{ .base = .{ .reg = .r12 } }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x41\xC6\x04\x24\x10", enc.code(), "mov BYTE PTR [r12], 0x10"); try expectEqualHexStrings("\x41\xC6\x04\x24\x10", enc.code(), "mov BYTE PTR [r12], 0x10");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r12 }, .{ .reg = .r12 },
.{ .imm = Instruction.Immediate.u(0x1000) }, .{ .imm = .u(0x1000) },
}); });
try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000"); try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r12 }, .{ .reg = .r12 },
.{ .imm = Instruction.Immediate.u(0x1000) }, .{ .imm = .u(0x1000) },
}); });
try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000"); try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .rax }, .{ .reg = .rax },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", enc.code(), "mov rax, 0x10"); try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", enc.code(), "mov rax, 0x10");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .r11 } }) }, .{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .r11 } }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", enc.code(), "mov DWORD PTR [r11], 0x10"); try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", enc.code(), "mov DWORD PTR [r11], 0x10");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .mem = Instruction.Memory.initRip(.qword, 0x10) }, .{ .mem = Instruction.Memory.initRip(.qword, 0x10) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x48\xC7\x05\x10\x00\x00\x00\x10\x00\x00\x00", "\x48\xC7\x05\x10\x00\x00\x00\x10\x00\x00\x00",
@ -1313,19 +1314,19 @@ test "lower MI encoding" {
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .mem = Instruction.Memory.initSib(.qword, .{ .base = .{ .reg = .rbp }, .disp = -8 }) }, .{ .mem = Instruction.Memory.initSib(.qword, .{ .base = .{ .reg = .rbp }, .disp = -8 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x48\xc7\x45\xf8\x10\x00\x00\x00", enc.code(), "mov QWORD PTR [rbp - 8], 0x10"); try expectEqualHexStrings("\x48\xc7\x45\xf8\x10\x00\x00\x00", enc.code(), "mov QWORD PTR [rbp - 8], 0x10");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .mem = Instruction.Memory.initSib(.word, .{ .base = .{ .reg = .rbp }, .disp = -2 }) }, .{ .mem = Instruction.Memory.initSib(.word, .{ .base = .{ .reg = .rbp }, .disp = -2 }) },
.{ .imm = Instruction.Immediate.s(-16) }, .{ .imm = .s(-16) },
}); });
try expectEqualHexStrings("\x66\xC7\x45\xFE\xF0\xFF", enc.code(), "mov WORD PTR [rbp - 2], -16"); try expectEqualHexStrings("\x66\xC7\x45\xFE\xF0\xFF", enc.code(), "mov WORD PTR [rbp - 2], -16");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .mem = Instruction.Memory.initSib(.byte, .{ .base = .{ .reg = .rbp }, .disp = -1 }) }, .{ .mem = Instruction.Memory.initSib(.byte, .{ .base = .{ .reg = .rbp }, .disp = -1 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\xC6\x45\xFF\x10", enc.code(), "mov BYTE PTR [rbp - 1], 0x10"); try expectEqualHexStrings("\xC6\x45\xFF\x10", enc.code(), "mov BYTE PTR [rbp - 1], 0x10");
@ -1335,7 +1336,7 @@ test "lower MI encoding" {
.disp = 0x10000000, .disp = 0x10000000,
.scale_index = .{ .scale = 2, .index = .rcx }, .scale_index = .{ .scale = 2, .index = .rcx },
}) }, }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x48\xC7\x04\x4D\x00\x00\x00\x10\x10\x00\x00\x00", "\x48\xC7\x04\x4D\x00\x00\x00\x10\x10\x00\x00\x00",
@ -1345,43 +1346,43 @@ test "lower MI encoding" {
try enc.encode(.adc, &.{ try enc.encode(.adc, &.{
.{ .mem = Instruction.Memory.initSib(.byte, .{ .base = .{ .reg = .rbp }, .disp = -0x10 }) }, .{ .mem = Instruction.Memory.initSib(.byte, .{ .base = .{ .reg = .rbp }, .disp = -0x10 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x80\x55\xF0\x10", enc.code(), "adc BYTE PTR [rbp - 0x10], 0x10"); try expectEqualHexStrings("\x80\x55\xF0\x10", enc.code(), "adc BYTE PTR [rbp - 0x10], 0x10");
try enc.encode(.adc, &.{ try enc.encode(.adc, &.{
.{ .mem = Instruction.Memory.initRip(.qword, 0) }, .{ .mem = Instruction.Memory.initRip(.qword, 0) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x48\x83\x15\x00\x00\x00\x00\x10", enc.code(), "adc QWORD PTR [rip], 0x10"); try expectEqualHexStrings("\x48\x83\x15\x00\x00\x00\x00\x10", enc.code(), "adc QWORD PTR [rip], 0x10");
try enc.encode(.adc, &.{ try enc.encode(.adc, &.{
.{ .reg = .rax }, .{ .reg = .rax },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x48\x83\xD0\x10", enc.code(), "adc rax, 0x10"); try expectEqualHexStrings("\x48\x83\xD0\x10", enc.code(), "adc rax, 0x10");
try enc.encode(.add, &.{ try enc.encode(.add, &.{
.{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .rdx }, .disp = -8 }) }, .{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .rdx }, .disp = -8 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x83\x42\xF8\x10", enc.code(), "add DWORD PTR [rdx - 8], 0x10"); try expectEqualHexStrings("\x83\x42\xF8\x10", enc.code(), "add DWORD PTR [rdx - 8], 0x10");
try enc.encode(.add, &.{ try enc.encode(.add, &.{
.{ .reg = .rax }, .{ .reg = .rax },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10"); try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10");
try enc.encode(.add, &.{ try enc.encode(.add, &.{
.{ .mem = Instruction.Memory.initSib(.qword, .{ .base = .{ .reg = .rbp }, .disp = -0x10 }) }, .{ .mem = Instruction.Memory.initSib(.qword, .{ .base = .{ .reg = .rbp }, .disp = -0x10 }) },
.{ .imm = Instruction.Immediate.s(-0x10) }, .{ .imm = .s(-0x10) },
}); });
try expectEqualHexStrings("\x48\x83\x45\xF0\xF0", enc.code(), "add QWORD PTR [rbp - 0x10], -0x10"); try expectEqualHexStrings("\x48\x83\x45\xF0\xF0", enc.code(), "add QWORD PTR [rbp - 0x10], -0x10");
try enc.encode(.@"and", &.{ try enc.encode(.@"and", &.{
.{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .ds }, .disp = 0x10000000 }) }, .{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .ds }, .disp = 0x10000000 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x83\x24\x25\x00\x00\x00\x10\x10", "\x83\x24\x25\x00\x00\x00\x10\x10",
@ -1391,7 +1392,7 @@ test "lower MI encoding" {
try enc.encode(.@"and", &.{ try enc.encode(.@"and", &.{
.{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .es }, .disp = 0x10000000 }) }, .{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .es }, .disp = 0x10000000 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x26\x83\x24\x25\x00\x00\x00\x10\x10", "\x26\x83\x24\x25\x00\x00\x00\x10\x10",
@ -1401,7 +1402,7 @@ test "lower MI encoding" {
try enc.encode(.@"and", &.{ try enc.encode(.@"and", &.{
.{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .r12 }, .disp = 0x10000000 }) }, .{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .r12 }, .disp = 0x10000000 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x41\x83\xA4\x24\x00\x00\x00\x10\x10", "\x41\x83\xA4\x24\x00\x00\x00\x10\x10",
@ -1411,7 +1412,7 @@ test "lower MI encoding" {
try enc.encode(.sub, &.{ try enc.encode(.sub, &.{
.{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .r11 }, .disp = 0x10000000 }) }, .{ .mem = Instruction.Memory.initSib(.dword, .{ .base = .{ .reg = .r11 }, .disp = 0x10000000 }) },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x41\x83\xAB\x00\x00\x00\x10\x10", "\x41\x83\xAB\x00\x00\x00\x10\x10",
@ -1630,14 +1631,14 @@ test "lower RMI encoding" {
try enc.encode(.imul, &.{ try enc.encode(.imul, &.{
.{ .reg = .r11 }, .{ .reg = .r11 },
.{ .reg = .r12 }, .{ .reg = .r12 },
.{ .imm = Instruction.Immediate.s(-2) }, .{ .imm = .s(-2) },
}); });
try expectEqualHexStrings("\x4D\x6B\xDC\xFE", enc.code(), "imul r11, r12, -2"); try expectEqualHexStrings("\x4D\x6B\xDC\xFE", enc.code(), "imul r11, r12, -2");
try enc.encode(.imul, &.{ try enc.encode(.imul, &.{
.{ .reg = .r11 }, .{ .reg = .r11 },
.{ .mem = Instruction.Memory.initRip(.qword, -16) }, .{ .mem = Instruction.Memory.initRip(.qword, -16) },
.{ .imm = Instruction.Immediate.s(-1024) }, .{ .imm = .s(-1024) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x4C\x69\x1D\xF0\xFF\xFF\xFF\x00\xFC\xFF\xFF", "\x4C\x69\x1D\xF0\xFF\xFF\xFF\x00\xFC\xFF\xFF",
@ -1648,7 +1649,7 @@ test "lower RMI encoding" {
try enc.encode(.imul, &.{ try enc.encode(.imul, &.{
.{ .reg = .bx }, .{ .reg = .bx },
.{ .mem = Instruction.Memory.initSib(.word, .{ .base = .{ .reg = .rbp }, .disp = -16 }) }, .{ .mem = Instruction.Memory.initSib(.word, .{ .base = .{ .reg = .rbp }, .disp = -16 }) },
.{ .imm = Instruction.Immediate.s(-1024) }, .{ .imm = .s(-1024) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x66\x69\x5D\xF0\x00\xFC", "\x66\x69\x5D\xF0\x00\xFC",
@ -1659,7 +1660,7 @@ test "lower RMI encoding" {
try enc.encode(.imul, &.{ try enc.encode(.imul, &.{
.{ .reg = .bx }, .{ .reg = .bx },
.{ .mem = Instruction.Memory.initSib(.word, .{ .base = .{ .reg = .rbp }, .disp = -16 }) }, .{ .mem = Instruction.Memory.initSib(.word, .{ .base = .{ .reg = .rbp }, .disp = -16 }) },
.{ .imm = Instruction.Immediate.u(1024) }, .{ .imm = .u(1024) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x66\x69\x5D\xF0\x00\x04", "\x66\x69\x5D\xF0\x00\x04",
@ -1775,7 +1776,7 @@ test "lower M encoding" {
try expectEqualHexStrings("\x65\xFF\x14\x25\x00\x00\x00\x00", enc.code(), "call gs:0x0"); try expectEqualHexStrings("\x65\xFF\x14\x25\x00\x00\x00\x00", enc.code(), "call gs:0x0");
try enc.encode(.call, &.{ try enc.encode(.call, &.{
.{ .imm = Instruction.Immediate.s(0) }, .{ .imm = .s(0) },
}); });
try expectEqualHexStrings("\xE8\x00\x00\x00\x00", enc.code(), "call 0x0"); try expectEqualHexStrings("\xE8\x00\x00\x00\x00", enc.code(), "call 0x0");
@ -1834,7 +1835,7 @@ test "lower OI encoding" {
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .rax }, .{ .reg = .rax },
.{ .imm = Instruction.Immediate.u(0x1000000000000000) }, .{ .imm = .u(0x1000000000000000) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x48\xB8\x00\x00\x00\x00\x00\x00\x00\x10", "\x48\xB8\x00\x00\x00\x00\x00\x00\x00\x10",
@ -1844,7 +1845,7 @@ test "lower OI encoding" {
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r11 }, .{ .reg = .r11 },
.{ .imm = Instruction.Immediate.u(0x1000000000000000) }, .{ .imm = .u(0x1000000000000000) },
}); });
try expectEqualHexStrings( try expectEqualHexStrings(
"\x49\xBB\x00\x00\x00\x00\x00\x00\x00\x10", "\x49\xBB\x00\x00\x00\x00\x00\x00\x00\x10",
@ -1854,19 +1855,19 @@ test "lower OI encoding" {
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r11d }, .{ .reg = .r11d },
.{ .imm = Instruction.Immediate.u(0x10000000) }, .{ .imm = .u(0x10000000) },
}); });
try expectEqualHexStrings("\x41\xBB\x00\x00\x00\x10", enc.code(), "mov r11d, 0x10000000"); try expectEqualHexStrings("\x41\xBB\x00\x00\x00\x10", enc.code(), "mov r11d, 0x10000000");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r11w }, .{ .reg = .r11w },
.{ .imm = Instruction.Immediate.u(0x1000) }, .{ .imm = .u(0x1000) },
}); });
try expectEqualHexStrings("\x66\x41\xBB\x00\x10", enc.code(), "mov r11w, 0x1000"); try expectEqualHexStrings("\x66\x41\xBB\x00\x10", enc.code(), "mov r11w, 0x1000");
try enc.encode(.mov, &.{ try enc.encode(.mov, &.{
.{ .reg = .r11b }, .{ .reg = .r11b },
.{ .imm = Instruction.Immediate.u(0x10) }, .{ .imm = .u(0x10) },
}); });
try expectEqualHexStrings("\x41\xB3\x10", enc.code(), "mov r11b, 0x10"); try expectEqualHexStrings("\x41\xB3\x10", enc.code(), "mov r11b, 0x10");
} }
@ -1940,7 +1941,7 @@ test "lower NP encoding" {
} }
fn invalidInstruction(mnemonic: Instruction.Mnemonic, ops: []const Instruction.Operand) !void { fn invalidInstruction(mnemonic: Instruction.Mnemonic, ops: []const Instruction.Operand) !void {
const err = Instruction.new(.none, mnemonic, ops); const err: Instruction = .new(.none, mnemonic, ops);
try testing.expectError(error.InvalidInstruction, err); try testing.expectError(error.InvalidInstruction, err);
} }
@ -1988,12 +1989,12 @@ test "invalid instruction" {
.{ .reg = .r12d }, .{ .reg = .r12d },
}); });
try invalidInstruction(.push, &.{ try invalidInstruction(.push, &.{
.{ .imm = Instruction.Immediate.u(0x1000000000000000) }, .{ .imm = .u(0x1000000000000000) },
}); });
} }
fn cannotEncode(mnemonic: Instruction.Mnemonic, ops: []const Instruction.Operand) !void { fn cannotEncode(mnemonic: Instruction.Mnemonic, ops: []const Instruction.Operand) !void {
try testing.expectError(error.CannotEncode, Instruction.new(.none, mnemonic, ops)); try testing.expectError(error.CannotEncode, .new(.none, mnemonic, ops));
} }
test "cannot encode" { test "cannot encode" {
@ -2177,7 +2178,7 @@ const Assembler = struct {
pub fn assemble(as: *Assembler, writer: anytype) !void { pub fn assemble(as: *Assembler, writer: anytype) !void {
while (try as.next()) |parsed_inst| { while (try as.next()) |parsed_inst| {
const inst = try Instruction.new(.none, parsed_inst.mnemonic, &parsed_inst.ops); const inst: Instruction = try .new(.none, parsed_inst.mnemonic, &parsed_inst.ops);
try inst.encode(writer, .{}); try inst.encode(writer, .{});
} }
} }

View File

@ -3548,7 +3548,7 @@ pub fn getTarget(self: MachO) std.Target {
pub fn invalidateKernelCache(dir: fs.Dir, sub_path: []const u8) !void { pub fn invalidateKernelCache(dir: fs.Dir, sub_path: []const u8) !void {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
if (comptime builtin.target.isDarwin() and builtin.target.cpu.arch == .aarch64) { if (builtin.target.isDarwin() and builtin.target.cpu.arch == .aarch64) {
try dir.copyFile(sub_path, dir, sub_path, .{}); try dir.copyFile(sub_path, dir, sub_path, .{});
} }
} }

View File

@ -277,8 +277,8 @@ test "function alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// function alignment is a compile error on wasm32/wasm64 // function alignment is a compile error on wasm
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; if (native_arch.isWasm()) return error.SkipZigTest;
const S = struct { const S = struct {
fn alignExpr() align(@sizeOf(usize) * 2) i32 { fn alignExpr() align(@sizeOf(usize) * 2) i32 {
@ -307,8 +307,8 @@ test "implicitly decreasing fn alignment" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// function alignment is a compile error on wasm32/wasm64 // function alignment is a compile error on wasm
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; if (native_arch.isWasm()) return error.SkipZigTest;
try testImplicitlyDecreaseFnAlign(alignedSmall, 1234); try testImplicitlyDecreaseFnAlign(alignedSmall, 1234);
try testImplicitlyDecreaseFnAlign(alignedBig, 5678); try testImplicitlyDecreaseFnAlign(alignedBig, 5678);
@ -331,9 +331,9 @@ test "@alignCast functions" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// function alignment is a compile error on wasm32/wasm64 // function alignment is a compile error on wasm
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; if (native_arch.isWasm()) return error.SkipZigTest;
if (native_arch == .thumb or native_arch == .thumbeb) return error.SkipZigTest; if (native_arch.isThumb()) return error.SkipZigTest;
try expect(fnExpectsOnly1(simple4) == 0x19); try expect(fnExpectsOnly1(simple4) == 0x19);
} }
@ -496,9 +496,9 @@ test "align(N) on functions" {
return error.SkipZigTest; return error.SkipZigTest;
} }
// function alignment is a compile error on wasm32/wasm64 // function alignment is a compile error on wasm
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; if (native_arch.isWasm()) return error.SkipZigTest;
if (native_arch == .thumb or native_arch == .thumbeb) return error.SkipZigTest; if (native_arch.isThumb()) return error.SkipZigTest;
try expect((@intFromPtr(&overaligned_fn) & (0x1000 - 1)) == 0); try expect((@intFromPtr(&overaligned_fn) & (0x1000 - 1)) == 0);
} }

View File

@ -178,7 +178,7 @@ test "rw constraint (x86_64)" {
} }
test "asm modifiers (AArch64)" { test "asm modifiers (AArch64)" {
if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest; if (!builtin.target.cpu.arch.isAARCH64()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly

View File

@ -660,6 +660,7 @@ test "arguments pointed to on stack into tailcall" {
switch (builtin.cpu.arch) { switch (builtin.cpu.arch) {
.wasm32, .wasm32,
.wasm64,
.mips, .mips,
.mipsel, .mipsel,
.mips64, .mips64,

View File

@ -124,7 +124,7 @@ test "@floatFromInt(f80)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1362,7 +1362,7 @@ test "cast f16 to wider types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -522,7 +522,7 @@ test "runtime 128 bit integer division" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -785,7 +785,7 @@ test "128-bit multiplication" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
{ {
@ -1374,7 +1374,7 @@ test "remainder division" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) { if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
@ -1527,7 +1527,7 @@ test "@round f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -1540,7 +1540,7 @@ test "@round f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -122,7 +122,7 @@ test "@min/max for floats" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View File

@ -58,7 +58,7 @@ test "@mulAdd f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -79,7 +79,7 @@ test "@mulAdd f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -189,7 +189,7 @@ test "vector f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector80(); try comptime vector80();
@ -216,7 +216,7 @@ test "vector f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try comptime vector128(); try comptime vector128();

View File

@ -164,10 +164,10 @@ test "saturating multiplication <= 32 bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .wasm32) { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/9660 // https://github.com/ziglang/zig/issues/9660
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -264,10 +264,10 @@ test "saturating multiplication" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .wasm32) { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/9660 // https://github.com/ziglang/zig/issues/9660
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -311,7 +311,7 @@ test "saturating shift-left" {
try testSatShl(i8, 127, 1, 127); try testSatShl(i8, 127, 1, 127);
try testSatShl(i8, -128, 1, -128); try testSatShl(i8, -128, 1, -128);
// TODO: remove this check once #9668 is completed // TODO: remove this check once #9668 is completed
if (builtin.cpu.arch != .wasm32) { if (!builtin.cpu.arch.isWasm()) {
// skip testing ints > 64 bits on wasm due to miscompilation / wasmtime ci error // skip testing ints > 64 bits on wasm due to miscompilation / wasmtime ci error
try testSatShl(i128, maxInt(i128), 64, maxInt(i128)); try testSatShl(i128, maxInt(i128), 64, maxInt(i128));
try testSatShl(u128, maxInt(u128), 64, maxInt(u128)); try testSatShl(u128, maxInt(u128), 64, maxInt(u128));

View File

@ -418,8 +418,8 @@ test "packed struct 24bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .wasm32) return error.SkipZigTest; // TODO if (builtin.cpu.arch.isWasm()) return error.SkipZigTest; // TODO
if (comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; // TODO if (builtin.cpu.arch.isArm()) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -818,7 +818,7 @@ test "non-packed struct with u128 entry in union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const U = union(enum) { const U = union(enum) {
@ -941,7 +941,7 @@ test "tuple assigned to variable" {
test "comptime struct field" { test "comptime struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; // TODO if (builtin.cpu.arch.isArm()) return error.SkipZigTest; // TODO
const T = struct { const T = struct {
a: i32, a: i32,

View File

@ -100,7 +100,7 @@ test "simple variadic function" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -161,7 +161,7 @@ test "coerce reference to var arg" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -194,7 +194,7 @@ test "variadic functions" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -239,7 +239,7 @@ test "copy VaList" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -273,7 +273,7 @@ test "unused VaList arg" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.os.tag != .macos and comptime builtin.cpu.arch.isAARCH64()) { if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }

View File

@ -101,7 +101,7 @@ test "vector float operators" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@ -754,7 +754,7 @@ test "vector reduce operation" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArm()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21091 if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21091
@ -989,7 +989,7 @@ test "saturating multiplication" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// TODO: once #9660 has been solved, remove this line // TODO: once #9660 has been solved, remove this line
if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; if (builtin.target.cpu.arch.isWasm()) return error.SkipZigTest;
const S = struct { const S = struct {
fn doTheTest() !void { fn doTheTest() !void {
@ -1256,7 +1256,7 @@ test "byte vector initialized in inline function" {
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and
builtin.cpu.features.isEnabled(@intFromEnum(std.Target.x86.Feature.avx512f))) std.Target.x86.featureSetHas(builtin.cpu.features, .avx512f))
{ {
// TODO https://github.com/ziglang/zig/issues/13279 // TODO https://github.com/ziglang/zig/issues/13279
return error.SkipZigTest; return error.SkipZigTest;

View File

@ -83,7 +83,7 @@ test "wrapping multiplication" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// TODO: once #9660 has been solved, remove this line // TODO: once #9660 has been solved, remove this line
if (builtin.cpu.arch == .wasm32) return error.SkipZigTest; if (builtin.cpu.arch.isWasm()) return error.SkipZigTest;
const S = struct { const S = struct {
fn doTheTest() !void { fn doTheTest() !void {