Merge pull request #15435 from jacobly0/x86_64-frame

x86_64: add frame indices
This commit is contained in:
Andrew Kelley 2023-04-27 15:45:01 -07:00 committed by GitHub
commit 011bc59e8a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 2776 additions and 2672 deletions

View File

@ -233,14 +233,30 @@ pub fn log(
/// Simpler main(), exercising fewer language features, so that
/// work-in-progress backends can handle it.
pub fn mainSimple() anyerror!void {
//const stderr = std.io.getStdErr();
const enable_print = false;
var passed: u64 = 0;
var skipped: u64 = 0;
var failed: u64 = 0;
const stderr = if (enable_print) std.io.getStdErr() else {};
for (builtin.test_functions) |test_fn| {
test_fn.func() catch |err| {
if (enable_print) stderr.writeAll(test_fn.name) catch {};
if (err != error.SkipZigTest) {
//stderr.writeAll(test_fn.name) catch {};
//stderr.writeAll("\n") catch {};
return err;
if (enable_print) stderr.writeAll("... FAIL\n") catch {};
failed += 1;
if (!enable_print) return err;
continue;
}
if (enable_print) stderr.writeAll("... SKIP\n") catch {};
skipped += 1;
continue;
};
//if (enable_print) stderr.writeAll("... PASS\n") catch {};
passed += 1;
}
if (enable_print) {
stderr.writer().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
if (failed != 0) std.process.exit(1);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,9 @@ prev_di_pc: usize,
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
relocs: std.ArrayListUnmanaged(Reloc) = .{},
pub const Error = Lower.Error || error{EmitFail};
pub const Error = Lower.Error || error{
EmitFail,
};
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |i| {
@ -22,7 +24,7 @@ pub fn emitMir(emit: *Emit) Error!void {
const start_offset = @intCast(u32, emit.code.items.len);
try emit.code_offset_mapping.putNoClobber(emit.lower.allocator, index, start_offset);
for (try emit.lower.lowerMir(inst)) |lower_inst| try lower_inst.encode(emit.code.writer());
for (try emit.lower.lowerMir(inst)) |lower_inst| try lower_inst.encode(emit.code.writer(), .{});
const end_offset = @intCast(u32, emit.code.items.len);
switch (inst.tag) {
@ -120,11 +122,10 @@ pub fn emitMir(emit: *Emit) Error!void {
.length = 6,
}),
.dbg_line => {
const dbg_line_column =
emit.lower.mir.extraData(Mir.DbgLineColumn, inst.data.payload).data;
try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column);
},
.dbg_line => try emit.dbgAdvancePCAndLine(
inst.data.line_column.line,
inst.data.line_column.column,
),
.dbg_prologue_end => {
switch (emit.debug_output) {

View File

@ -47,17 +47,8 @@ pub fn findByMnemonic(
},
else => {},
} else false;
const rex_extended = for (ops) |op| switch (op) {
.reg => |r| if (r.isExtended()) break true,
.mem => |m| {
if (m.base()) |base| {
if (base.isExtended()) break true;
}
if (m.scaleIndex()) |si| {
if (si.index.isExtended()) break true;
}
},
else => {},
const rex_extended = for (ops) |op| {
if (op.isBaseExtended() or op.isIndexExtended()) break true;
} else false;
if ((rex_required or rex_extended) and rex_invalid) return error.CannotEncode;
@ -67,11 +58,11 @@ pub fn findByMnemonic(
next: for (mnemonic_to_encodings_map[@enumToInt(mnemonic)]) |data| {
switch (data.mode) {
.rex => if (!rex_required) continue,
.long => {},
.long, .sse2_long => {},
else => if (rex_required) continue,
}
for (input_ops, data.ops) |input_op, data_op|
if (!input_op.isSubset(data_op, data.mode)) continue :next;
if (!input_op.isSubset(data_op)) continue :next;
const enc = Encoding{ .mnemonic = mnemonic, .data = data };
if (shortest_enc) |previous_shortest_enc| {
@ -99,7 +90,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct {
if (prefixes.rex.w) {
switch (data.mode) {
.short, .fpu, .sse, .sse2, .sse4_1, .none => continue,
.long, .rex => {},
.long, .sse2_long, .rex => {},
}
} else if (prefixes.rex.present and !prefixes.rex.isSet()) {
switch (data.mode) {
@ -147,7 +138,7 @@ pub fn modRmExt(encoding: Encoding) u3 {
pub fn operandBitSize(encoding: Encoding) u64 {
switch (encoding.data.mode) {
.short => return 16,
.long => return 64,
.long, .sse2_long => return 64,
else => {},
}
const bit_size: u64 = switch (encoding.data.op_en) {
@ -172,7 +163,7 @@ pub fn format(
_ = options;
_ = fmt;
switch (encoding.data.mode) {
.long => try writer.writeAll("REX.W + "),
.long, .sse2_long => try writer.writeAll("REX.W + "),
else => {},
}
@ -273,6 +264,8 @@ pub const Mnemonic = enum {
@"test", tzcnt,
ud2,
xadd, xchg, xor,
// MMX
movd,
// SSE
addss,
cmpss,
@ -287,7 +280,7 @@ pub const Mnemonic = enum {
//cmpsd,
divsd,
maxsd, minsd,
movq, //movsd,
movq, //movd, movsd,
mulsd,
subsd,
ucomisd,
@ -470,6 +463,17 @@ pub const Op = enum {
};
}
pub fn class(op: Op) bits.Register.Class {
return switch (op) {
else => unreachable,
.al, .ax, .eax, .rax, .cl => .general_purpose,
.r8, .r16, .r32, .r64 => .general_purpose,
.rm8, .rm16, .rm32, .rm64 => .general_purpose,
.sreg => .segment,
.xmm, .xmm_m32, .xmm_m64 => .floating_point,
};
}
pub fn isFloatingPointRegister(op: Op) bool {
return switch (op) {
.xmm, .xmm_m32, .xmm_m64 => true,
@ -478,7 +482,7 @@ pub const Op = enum {
}
/// Given an operand `op` checks if `target` is a subset for the purposes of the encoding.
pub fn isSubset(op: Op, target: Op, mode: Mode) bool {
pub fn isSubset(op: Op, target: Op) bool {
switch (op) {
.m, .o16, .o32, .o64 => unreachable,
.moffs, .sreg => return op == target,
@ -488,13 +492,13 @@ pub const Op = enum {
},
else => {
if (op.isRegister() and target.isRegister()) {
switch (mode) {
.sse, .sse2, .sse4_1 => return op.isFloatingPointRegister() and target.isFloatingPointRegister(),
else => switch (target) {
.cl, .al, .ax, .eax, .rax => return op == target,
else => return op.bitSize() == target.bitSize(),
return switch (target) {
.cl, .al, .ax, .eax, .rax => op == target,
else => op.class() == target.class() and switch (target.class()) {
.floating_point => true,
else => op.bitSize() == target.bitSize(),
},
}
};
}
if (op.isMemory() and target.isMemory()) {
switch (target) {
@ -532,6 +536,7 @@ pub const Mode = enum {
long,
sse,
sse2,
sse2_long,
sse4_1,
};
@ -544,7 +549,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
std.mem.copy(Operand, &inst.ops, ops);
var cwriter = std.io.countingWriter(std.io.null_writer);
inst.encode(cwriter.writer()) catch unreachable; // Not allowed to fail here unless OOM.
inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM.
return @intCast(usize, cwriter.bytes_written);
}

View File

@ -60,6 +60,8 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction {
.mfence,
.mov,
.movbe,
.movd,
.movq,
.movzx,
.mul,
.neg,
@ -136,7 +138,8 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction {
.setcc => try lower.mirSetcc(inst),
.jcc => try lower.emit(.none, mnem_cc(.j, inst.data.inst_cc.cc), &.{.{ .imm = Immediate.s(0) }}),
.push_regs, .pop_regs => try lower.mirPushPopRegisterList(inst),
.push_regs => try lower.mirPushPopRegisterList(inst, .push),
.pop_regs => try lower.mirPushPopRegisterList(inst, .pop),
.dbg_line,
.dbg_prologue_end,
@ -190,7 +193,7 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
}
fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
return switch (ops) {
return lower.mir.resolveFrameLoc(switch (ops) {
.rm_sib,
.rm_sib_cc,
.m_sib,
@ -227,7 +230,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
=> lower.mir.extraData(Mir.MemoryMoffs, payload).data.decode(),
else => unreachable,
};
});
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
@ -417,23 +420,15 @@ fn mirSetcc(lower: *Lower, inst: Mir.Inst) Error!void {
}
}
fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst) Error!void {
const save_reg_list = lower.mir.extraData(Mir.SaveRegisterList, inst.data.payload).data;
const base = @intToEnum(Register, save_reg_list.base_reg);
var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst, comptime mnemonic: Mnemonic) Error!void {
const reg_list = Mir.RegisterList.fromInt(inst.data.payload);
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*);
for (callee_preserved_regs) |callee_preserved_reg| {
if (!reg_list.isSet(callee_preserved_regs, callee_preserved_reg)) continue;
const reg_op = Operand{ .reg = callee_preserved_reg };
const mem_op = Operand{ .mem = Memory.sib(.qword, .{ .base = base, .disp = disp }) };
try lower.emit(.none, .mov, switch (inst.tag) {
.push_regs => &.{ mem_op, reg_op },
.pop_regs => &.{ reg_op, mem_op },
else => unreachable,
});
disp += 8;
}
var it = reg_list.iterator(.{ .direction = switch (mnemonic) {
.push => .reverse,
.pop => .forward,
else => unreachable,
} });
while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
}
fn mirLeaLinker(lower: *Lower, inst: Mir.Inst) Error!void {

View File

@ -23,6 +23,7 @@ const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
frame_locs: std.MultiArrayList(FrameLoc).Slice,
pub const Inst = struct {
tag: Tag,
@ -98,6 +99,10 @@ pub const Inst = struct {
mov,
/// Move data after swapping bytes
movbe,
/// Move doubleword
movd,
/// Move quadword
movq,
/// Move with sign extension
movsx,
/// Move with zero extension
@ -241,13 +246,13 @@ pub const Inst = struct {
/// Start of epilogue
dbg_epilogue_begin,
/// Update debug line
/// Uses `payload` payload with data of type `DbgLineColumn`.
/// Uses `line_column` payload containing the line and column.
dbg_line,
/// Push registers
/// Uses `payload` payload with data of type `SaveRegisterList`.
/// Uses `payload` payload containing `RegisterList.asInt` directly.
push_regs,
/// Pop registers
/// Uses `payload` payload with data of type `SaveRegisterList`.
/// Uses `payload` payload containing `RegisterList.asInt` directly.
pop_regs,
/// Tombstone
@ -500,6 +505,11 @@ pub const Inst = struct {
/// Index into the linker's symbol table.
sym_index: u32,
},
/// Debug line and column position
line_column: struct {
line: u32,
column: u32,
},
/// Index into `extra`. Meaning of what can be found there is context-dependent.
payload: u32,
};
@ -522,12 +532,11 @@ pub const LeaRegisterReloc = struct {
sym_index: u32,
};
/// Used in conjunction with `SaveRegisterList` payload to transfer a list of used registers
/// in a compact manner.
/// Used in conjunction with payload to transfer a list of used registers in a compact manner.
pub const RegisterList = struct {
bitset: BitSet = BitSet.initEmpty(),
const BitSet = IntegerBitSet(@ctz(@as(u32, 0)));
const BitSet = IntegerBitSet(32);
const Self = @This();
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
@ -547,6 +556,10 @@ pub const RegisterList = struct {
return self.bitset.isSet(index);
}
pub fn iterator(self: Self, comptime options: std.bit_set.IteratorOptions) BitSet.Iterator(options) {
return self.bitset.iterator(options);
}
pub fn asInt(self: Self) u32 {
return self.bitset.mask;
}
@ -562,14 +575,6 @@ pub const RegisterList = struct {
}
};
pub const SaveRegisterList = struct {
/// Base register
base_reg: u32,
/// Use `RegisterList` to populate.
register_list: u32,
stack_end: u32,
};
pub const Imm64 = struct {
msb: u32,
lsb: u32,
@ -593,41 +598,51 @@ pub const Imm64 = struct {
pub const MemorySib = struct {
/// Size of the pointer.
ptr_size: u32,
/// Base register. -1 means null, or no base register.
base: i32,
/// Scale for index register. -1 means null, or no scale.
/// This has to be in sync with `index` field.
scale: i32,
/// Index register. -1 means null, or no index register.
/// This has to be in sync with `scale` field.
index: i32,
/// Base register tag of type Memory.Base.Tag
base_tag: u32,
/// Base register of type Register or FrameIndex
base: u32,
/// Scale starting at bit 0 and index register starting at bit 4.
scale_index: u32,
/// Displacement value.
disp: i32,
pub fn encode(mem: Memory) MemorySib {
const sib = mem.sib;
assert(sib.scale_index.scale == 0 or std.math.isPowerOfTwo(sib.scale_index.scale));
return .{
.ptr_size = @enumToInt(sib.ptr_size),
.base = if (sib.base) |r| @enumToInt(r) else -1,
.scale = if (sib.scale_index) |si| si.scale else -1,
.index = if (sib.scale_index) |si| @enumToInt(si.index) else -1,
.base_tag = @enumToInt(@as(Memory.Base.Tag, sib.base)),
.base = switch (sib.base) {
.none => undefined,
.reg => |r| @enumToInt(r),
.frame => |fi| @enumToInt(fi),
},
.scale_index = @as(u32, sib.scale_index.scale) << 0 |
@as(u32, if (sib.scale_index.scale > 0)
@enumToInt(sib.scale_index.index)
else
undefined) << 4,
.disp = sib.disp,
};
}
pub fn decode(msib: MemorySib) Memory {
const base: ?Register = if (msib.base == -1) null else @intToEnum(Register, msib.base);
const scale_index: ?Memory.ScaleIndex = if (msib.index == -1) null else .{
.scale = @intCast(u4, msib.scale),
.index = @intToEnum(Register, msib.index),
};
const mem: Memory = .{ .sib = .{
const scale = @truncate(u4, msib.scale_index);
assert(scale == 0 or std.math.isPowerOfTwo(scale));
return .{ .sib = .{
.ptr_size = @intToEnum(Memory.PtrSize, msib.ptr_size),
.base = base,
.scale_index = scale_index,
.base = switch (@intToEnum(Memory.Base.Tag, msib.base_tag)) {
.none => .none,
.reg => .{ .reg = @intToEnum(Register, msib.base) },
.frame => .{ .frame = @intToEnum(bits.FrameIndex, msib.base) },
},
.scale_index = .{
.scale = scale,
.index = if (scale > 0) @intToEnum(Register, msib.scale_index >> 4) else undefined,
},
.disp = msib.disp,
} };
return mem;
}
};
@ -676,14 +691,10 @@ pub const MemoryMoffs = struct {
}
};
pub const DbgLineColumn = struct {
line: u32,
column: u32,
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.frame_locs.deinit(gpa);
mir.* = undefined;
}
@ -704,3 +715,22 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
.end = i,
};
}
pub const FrameLoc = struct {
base: Register,
disp: i32,
};
pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory {
return switch (mem) {
.sib => |sib| switch (sib.base) {
.none, .reg => mem,
.frame => |index| if (mir.frame_locs.len > 0) Memory.sib(sib.ptr_size, .{
.base = .{ .reg = mir.frame_locs.items(.base)[@enumToInt(index)] },
.disp = mir.frame_locs.items(.disp)[@enumToInt(index)] + sib.disp,
.scale_index = mem.scaleIndex(),
}) else mem,
},
.rip, .moffs => mem,
};
}

View File

@ -405,14 +405,69 @@ test "Register classes" {
try expect(Register.fs.class() == .segment);
}
pub const FrameIndex = enum(u32) {
// This index refers to the start of the arguments passed to this function
args_frame,
// This index refers to the return address pushed by a `call` and popped by a `ret`.
ret_addr,
// This index refers to the base pointer pushed in the prologue and popped in the epilogue.
base_ptr,
// This index refers to the entire stack frame.
stack_frame,
// This index refers to the start of the call frame for arguments passed to called functions
call_frame,
// Other indices are used for local variable stack slots
_,
pub const named_count = @typeInfo(FrameIndex).Enum.fields.len;
pub fn isNamed(fi: FrameIndex) bool {
return @enumToInt(fi) < named_count;
}
pub fn format(
fi: FrameIndex,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
try writer.writeAll("FrameIndex");
if (fi.isNamed()) {
try writer.writeByte('.');
try writer.writeAll(@tagName(fi));
} else {
try writer.writeByte('(');
try std.fmt.formatType(@enumToInt(fi), fmt, options, writer, 0);
try writer.writeByte(')');
}
}
};
pub const Memory = union(enum) {
sib: Sib,
rip: Rip,
moffs: Moffs,
pub const ScaleIndex = packed struct {
pub const Base = union(enum) {
none,
reg: Register,
frame: FrameIndex,
pub const Tag = @typeInfo(Base).Union.tag_type.?;
pub fn isExtended(self: Base) bool {
return switch (self) {
.none, .frame => false, // neither rsp nor rbp are extended
.reg => |reg| reg.isExtended(),
};
}
};
pub const ScaleIndex = struct {
scale: u4,
index: Register,
const none = ScaleIndex{ .scale = 0, .index = undefined };
};
pub const PtrSize = enum {
@ -460,8 +515,8 @@ pub const Memory = union(enum) {
pub const Sib = struct {
ptr_size: PtrSize,
base: ?Register,
scale_index: ?ScaleIndex,
base: Base,
scale_index: ScaleIndex,
disp: i32,
};
@ -482,7 +537,7 @@ pub const Memory = union(enum) {
pub fn sib(ptr_size: PtrSize, args: struct {
disp: i32 = 0,
base: ?Register = null,
base: Base = .none,
scale_index: ?ScaleIndex = null,
}) Memory {
if (args.scale_index) |si| assert(std.math.isPowerOfTwo(si.scale));
@ -490,7 +545,7 @@ pub const Memory = union(enum) {
.base = args.base,
.disp = args.disp,
.ptr_size = ptr_size,
.scale_index = args.scale_index,
.scale_index = if (args.scale_index) |si| si else ScaleIndex.none,
} };
}
@ -502,22 +557,25 @@ pub const Memory = union(enum) {
return switch (mem) {
.moffs => true,
.rip => false,
.sib => |s| if (s.base) |r| r.class() == .segment else false,
.sib => |s| switch (s.base) {
.none, .frame => false,
.reg => |reg| reg.class() == .segment,
},
};
}
pub fn base(mem: Memory) ?Register {
pub fn base(mem: Memory) Base {
return switch (mem) {
.moffs => |m| m.seg,
.moffs => |m| .{ .reg = m.seg },
.sib => |s| s.base,
.rip => null,
.rip => .none,
};
}
pub fn scaleIndex(mem: Memory) ?ScaleIndex {
return switch (mem) {
.moffs, .rip => null,
.sib => |s| s.scale_index,
.sib => |s| if (s.scale_index.scale > 0) s.scale_index else null,
};
}

View File

@ -54,6 +54,21 @@ pub const Instruction = struct {
};
}
pub fn isBaseExtended(op: Operand) bool {
return switch (op) {
.none, .imm => false,
.reg => |reg| reg.isExtended(),
.mem => |mem| mem.base().isExtended(),
};
}
pub fn isIndexExtended(op: Operand) bool {
return switch (op) {
.none, .reg, .imm => false,
.mem => |mem| if (mem.scaleIndex()) |si| si.index.isExtended() else false,
};
}
fn format(
op: Operand,
comptime unused_format_string: []const u8,
@ -98,17 +113,24 @@ pub const Instruction = struct {
try writer.print("{s} ptr ", .{@tagName(sib.ptr_size)});
if (mem.isSegmentRegister()) {
return writer.print("{s}:0x{x}", .{ @tagName(sib.base.?), sib.disp });
return writer.print("{s}:0x{x}", .{ @tagName(sib.base.reg), sib.disp });
}
try writer.writeByte('[');
var any = false;
if (sib.base) |base| {
try writer.print("{s}", .{@tagName(base)});
any = true;
switch (sib.base) {
.none => {},
.reg => |reg| {
try writer.print("{s}", .{@tagName(reg)});
any = true;
},
.frame => |frame| {
try writer.print("{}", .{frame});
any = true;
},
}
if (sib.scale_index) |si| {
if (mem.scaleIndex()) |si| {
if (any) try writer.writeAll(" + ");
try writer.print("{s} * {d}", .{ @tagName(si.index), si.scale });
any = true;
@ -182,8 +204,8 @@ pub const Instruction = struct {
}
}
pub fn encode(inst: Instruction, writer: anytype) !void {
const encoder = Encoder(@TypeOf(writer)){ .writer = writer };
pub fn encode(inst: Instruction, writer: anytype, comptime opts: Options) !void {
const encoder = Encoder(@TypeOf(writer), opts){ .writer = writer };
const enc = inst.encoding;
const data = enc.data;
@ -269,22 +291,24 @@ pub const Instruction = struct {
const segment_override: ?Register = switch (op_en) {
.i, .zi, .o, .oi, .d, .np => null,
.fd => inst.ops[1].mem.base().?,
.td => inst.ops[0].mem.base().?,
.rm, .rmi => if (inst.ops[1].isSegmentRegister()) blk: {
break :blk switch (inst.ops[1]) {
.reg => |r| r,
.mem => |m| m.base().?,
.fd => inst.ops[1].mem.base().reg,
.td => inst.ops[0].mem.base().reg,
.rm, .rmi => if (inst.ops[1].isSegmentRegister())
switch (inst.ops[1]) {
.reg => |reg| reg,
.mem => |mem| mem.base().reg,
else => unreachable,
};
} else null,
.m, .mi, .m1, .mc, .mr, .mri, .mrc => if (inst.ops[0].isSegmentRegister()) blk: {
break :blk switch (inst.ops[0]) {
.reg => |r| r,
.mem => |m| m.base().?,
}
else
null,
.m, .mi, .m1, .mc, .mr, .mri, .mrc => if (inst.ops[0].isSegmentRegister())
switch (inst.ops[0]) {
.reg => |reg| reg,
.mem => |mem| mem.base().reg,
else => unreachable,
};
} else null,
}
else
null,
};
if (segment_override) |seg| {
legacy.setSegmentOverride(seg);
@ -298,7 +322,10 @@ pub const Instruction = struct {
var rex = Rex{};
rex.present = inst.encoding.data.mode == .rex;
rex.w = inst.encoding.data.mode == .long;
switch (inst.encoding.data.mode) {
.long, .sse2_long => rex.w = true,
else => {},
}
switch (op_en) {
.np, .i, .zi, .fd, .td, .d => {},
@ -307,27 +334,17 @@ pub const Instruction = struct {
const r_op = switch (op_en) {
.rm, .rmi => inst.ops[0],
.mr, .mri, .mrc => inst.ops[1],
else => null,
else => .none,
};
if (r_op) |op| {
rex.r = op.reg.isExtended();
}
rex.r = r_op.isBaseExtended();
const b_x_op = switch (op_en) {
.rm, .rmi => inst.ops[1],
.m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0],
else => unreachable,
};
switch (b_x_op) {
.reg => |r| {
rex.b = r.isExtended();
},
.mem => |mem| {
rex.b = if (mem.base()) |base| base.isExtended() else false;
rex.x = if (mem.scaleIndex()) |si| si.index.isExtended() else false;
},
else => unreachable,
}
rex.b = b_x_op.isBaseExtended();
rex.x = b_x_op.isIndexExtended();
},
}
@ -348,72 +365,75 @@ pub const Instruction = struct {
switch (mem) {
.moffs => unreachable,
.sib => |sib| {
if (sib.base) |base| {
if (base.class() == .segment) {
// TODO audit this wrt SIB
try encoder.modRm_SIBDisp0(operand_enc);
if (sib.scale_index) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc());
} else {
try encoder.sib_disp32();
}
try encoder.disp32(sib.disp);
} else {
assert(base.class() == .general_purpose);
const dst = base.lowEnc();
const src = operand_enc;
if (dst == 4 or sib.scale_index != null) {
if (sib.disp == 0 and dst != 5) {
try encoder.modRm_SIBDisp0(src);
if (sib.scale_index) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexBase(scale, si.index.lowEnc(), dst);
} else {
try encoder.sib_base(dst);
}
} else if (math.cast(i8, sib.disp)) |_| {
try encoder.modRm_SIBDisp8(src);
if (sib.scale_index) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexBaseDisp8(scale, si.index.lowEnc(), dst);
} else {
try encoder.sib_baseDisp8(dst);
}
try encoder.disp8(@truncate(i8, sib.disp));
} else {
try encoder.modRm_SIBDisp32(src);
if (sib.scale_index) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexBaseDisp32(scale, si.index.lowEnc(), dst);
} else {
try encoder.sib_baseDisp32(dst);
}
try encoder.disp32(sib.disp);
}
} else {
if (sib.disp == 0 and dst != 5) {
try encoder.modRm_indirectDisp0(src, dst);
} else if (math.cast(i8, sib.disp)) |_| {
try encoder.modRm_indirectDisp8(src, dst);
try encoder.disp8(@truncate(i8, sib.disp));
} else {
try encoder.modRm_indirectDisp32(src, dst);
try encoder.disp32(sib.disp);
}
}
}
} else {
.sib => |sib| switch (sib.base) {
.none => {
try encoder.modRm_SIBDisp0(operand_enc);
if (sib.scale_index) |si| {
if (mem.scaleIndex()) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc());
} else {
try encoder.sib_disp32();
}
try encoder.disp32(sib.disp);
}
},
.reg => |base| if (base.class() == .segment) {
// TODO audit this wrt SIB
try encoder.modRm_SIBDisp0(operand_enc);
if (mem.scaleIndex()) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexDisp32(scale, si.index.lowEnc());
} else {
try encoder.sib_disp32();
}
try encoder.disp32(sib.disp);
} else {
assert(base.class() == .general_purpose);
const dst = base.lowEnc();
const src = operand_enc;
if (dst == 4 or mem.scaleIndex() != null) {
if (sib.disp == 0 and dst != 5) {
try encoder.modRm_SIBDisp0(src);
if (mem.scaleIndex()) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexBase(scale, si.index.lowEnc(), dst);
} else {
try encoder.sib_base(dst);
}
} else if (math.cast(i8, sib.disp)) |_| {
try encoder.modRm_SIBDisp8(src);
if (mem.scaleIndex()) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexBaseDisp8(scale, si.index.lowEnc(), dst);
} else {
try encoder.sib_baseDisp8(dst);
}
try encoder.disp8(@truncate(i8, sib.disp));
} else {
try encoder.modRm_SIBDisp32(src);
if (mem.scaleIndex()) |si| {
const scale = math.log2_int(u4, si.scale);
try encoder.sib_scaleIndexBaseDisp32(scale, si.index.lowEnc(), dst);
} else {
try encoder.sib_baseDisp32(dst);
}
try encoder.disp32(sib.disp);
}
} else {
if (sib.disp == 0 and dst != 5) {
try encoder.modRm_indirectDisp0(src, dst);
} else if (math.cast(i8, sib.disp)) |_| {
try encoder.modRm_indirectDisp8(src, dst);
try encoder.disp8(@truncate(i8, sib.disp));
} else {
try encoder.modRm_indirectDisp32(src, dst);
try encoder.disp32(sib.disp);
}
}
},
.frame => if (@TypeOf(encoder).options.allow_frame_loc) {
try encoder.modRm_indirectDisp32(operand_enc, undefined);
try encoder.disp32(undefined);
} else return error.CannotEncode,
},
.rip => |rip| {
try encoder.modRm_RIPDisp32(operand_enc);
@ -482,11 +502,14 @@ pub const LegacyPrefixes = packed struct {
}
};
fn Encoder(comptime T: type) type {
pub const Options = struct { allow_frame_loc: bool = false };
fn Encoder(comptime T: type, comptime opts: Options) type {
return struct {
writer: T,
const Self = @This();
pub const options = opts;
// --------
// Prefixes

View File

@ -860,6 +860,12 @@ pub const table = [_]Entry{
.{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .sse2 },
.{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .sse2 },
.{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .sse2 },
.{ .movq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .sse2_long },
.{ .movq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .sse2_long },
.{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .sse2 },
.{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .sse2 },

View File

@ -492,7 +492,7 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void {
const sect_vm_capacity = self.allocatedVirtualSize(header.virtual_address);
if (needed_size > sect_vm_capacity) {
self.markRelocsDirtyByAddress(header.virtual_address + needed_size);
self.markRelocsDirtyByAddress(header.virtual_address + header.virtual_size);
try self.growSectionVirtualMemory(sect_id, needed_size);
}
@ -759,7 +759,9 @@ fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []u8) !void {
if (self.relocs.getPtr(atom_index)) |rels| {
try relocs.ensureTotalCapacityPrecise(rels.items.len);
for (rels.items) |*reloc| {
if (reloc.isResolvable(self)) relocs.appendAssumeCapacity(reloc);
if (reloc.isResolvable(self) and reloc.dirty) {
relocs.appendAssumeCapacity(reloc);
}
}
}
@ -904,18 +906,28 @@ fn markRelocsDirtyByTarget(self: *Coff, target: SymbolWithLoc) void {
}
fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
const got_moved = blk: {
const sect_id = self.got_section_index orelse break :blk false;
break :blk self.sections.items(.header)[sect_id].virtual_address > addr;
};
// TODO: dirty relocations targeting import table if that got moved in memory
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
const target_vaddr = reloc.getTargetAddress(self) orelse continue;
if (target_vaddr < addr) continue;
reloc.dirty = true;
if (reloc.isGotIndirection()) {
reloc.dirty = reloc.dirty or got_moved;
} else {
const target_vaddr = reloc.getTargetAddress(self) orelse continue;
if (target_vaddr > addr) reloc.dirty = true;
}
}
}
// TODO: dirty only really affected GOT cells
for (self.got_table.entries.items) |entry| {
const target_addr = self.getSymbol(entry).value;
if (target_addr >= addr) {
if (target_addr > addr) {
self.got_table_contents_dirty = true;
break;
}
@ -1624,7 +1636,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
for (self.relocs.keys(), self.relocs.values()) |atom_index, relocs| {
const needs_update = for (relocs.items) |reloc| {
if (reloc.isResolvable(self)) break true;
if (reloc.dirty) break true;
} else false;
if (!needs_update) continue;

View File

@ -45,6 +45,19 @@ pcrel: bool,
length: u2,
dirty: bool = true,
/// Returns true if and only if the reloc can be resolved.
pub fn isResolvable(self: Relocation, coff_file: *Coff) bool {
_ = self.getTargetAddress(coff_file) orelse return false;
return true;
}
pub fn isGotIndirection(self: Relocation) bool {
return switch (self.type) {
.got, .got_page, .got_pageoff => true,
else => false,
};
}
/// Returns address of the target if any.
pub fn getTargetAddress(self: Relocation, coff_file: *const Coff) ?u32 {
switch (self.type) {
@ -53,11 +66,6 @@ pub fn getTargetAddress(self: Relocation, coff_file: *const Coff) ?u32 {
const header = coff_file.sections.items(.header)[coff_file.got_section_index.?];
return header.virtual_address + got_index * coff_file.ptr_width.size();
},
.direct, .page, .pageoff => {
const target_atom_index = coff_file.getAtomIndexForSymbol(self.target) orelse return null;
const target_atom = coff_file.getAtom(target_atom_index);
return target_atom.getSymbol(coff_file).value;
},
.import, .import_page, .import_pageoff => {
const sym = coff_file.getSymbol(self.target);
const index = coff_file.import_tables.getIndex(sym.value) orelse return null;
@ -68,16 +76,14 @@ pub fn getTargetAddress(self: Relocation, coff_file: *const Coff) ?u32 {
.name_off = sym.value,
});
},
else => {
const target_atom_index = coff_file.getAtomIndexForSymbol(self.target) orelse return null;
const target_atom = coff_file.getAtom(target_atom_index);
return target_atom.getSymbol(coff_file).value;
},
}
}
/// Returns true if and only if the reloc is dirty AND the target address is available.
pub fn isResolvable(self: Relocation, coff_file: *Coff) bool {
const addr = self.getTargetAddress(coff_file) orelse return false;
if (addr == 0) return false;
return self.dirty;
}
pub fn resolve(self: Relocation, atom_index: Atom.Index, code: []u8, image_base: u64, coff_file: *Coff) void {
const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);

View File

@ -1146,7 +1146,9 @@ pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []u8) !void {
if (self.relocs.getPtr(atom_index)) |rels| {
try relocs.ensureTotalCapacityPrecise(rels.items.len);
for (rels.items) |*reloc| {
if (reloc.isResolvable(self)) relocs.appendAssumeCapacity(reloc);
if (reloc.isResolvable(self) and reloc.dirty) {
relocs.appendAssumeCapacity(reloc);
}
}
}
@ -1332,18 +1334,33 @@ fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
log.debug("marking relocs dirty by address: {x}", .{addr});
const got_moved = blk: {
const sect_id = self.got_section_index orelse break :blk false;
break :blk self.sections.items(.header)[sect_id].addr > addr;
};
const stubs_moved = blk: {
const sect_id = self.stubs_section_index orelse break :blk false;
break :blk self.sections.items(.header)[sect_id].addr > addr;
};
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
const target_addr = reloc.getTargetBaseAddress(self) orelse continue;
if (target_addr < addr) continue;
reloc.dirty = true;
if (reloc.isGotIndirection()) {
reloc.dirty = reloc.dirty or got_moved;
} else if (reloc.isStubTrampoline(self)) {
reloc.dirty = reloc.dirty or stubs_moved;
} else {
const target_addr = reloc.getTargetBaseAddress(self) orelse continue;
if (target_addr > addr) reloc.dirty = true;
}
}
}
// TODO: dirty only really affected GOT cells
for (self.got_table.entries.items) |entry| {
const target_addr = self.getSymbol(entry).n_value;
if (target_addr >= addr) {
if (target_addr > addr) {
self.got_table_contents_dirty = true;
break;
}
@ -1353,7 +1370,7 @@ fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
const stubs_addr = self.getSegment(self.stubs_section_index.?).vmaddr;
const stub_helper_addr = self.getSegment(self.stub_helper_section_index.?).vmaddr;
const laptr_addr = self.getSegment(self.la_symbol_ptr_section_index.?).vmaddr;
if (stubs_addr >= addr or stub_helper_addr >= addr or laptr_addr >= addr)
if (stubs_addr > addr or stub_helper_addr > addr or laptr_addr > addr)
self.stub_table_contents_dirty = true;
}
}
@ -2794,7 +2811,7 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void {
const sect_vm_capacity = self.allocatedVirtualSize(segment.vmaddr);
if (needed_size > sect_vm_capacity) {
self.markRelocsDirtyByAddress(segment.vmaddr + needed_size);
self.markRelocsDirtyByAddress(segment.vmaddr + segment.vmsize);
try self.growSectionVirtualMemory(sect_id, needed_size);
}
@ -4067,11 +4084,12 @@ pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, p
pub fn logSections(self: *MachO) void {
log.debug("sections:", .{});
for (self.sections.items(.header), 0..) |header, i| {
log.debug(" sect({d}): {s},{s} @{x}, sizeof({x})", .{
log.debug(" sect({d}): {s},{s} @{x} ({x}), sizeof({x})", .{
i + 1,
header.segName(),
header.sectName(),
header.offset,
header.addr,
header.size,
});
}

View File

@ -37,14 +37,33 @@ pub const Type = enum {
tlv_initializer,
};
/// Returns true if and only if the reloc is dirty AND the target address is available.
/// Returns true if and only if the reloc can be resolved.
pub fn isResolvable(self: Relocation, macho_file: *MachO) bool {
const addr = self.getTargetBaseAddress(macho_file) orelse return false;
if (addr == 0) return false;
return self.dirty;
_ = self.getTargetBaseAddress(macho_file) orelse return false;
return true;
}
pub fn isGotIndirection(self: Relocation) bool {
return switch (self.type) {
.got, .got_page, .got_pageoff => true,
else => false,
};
}
pub fn isStubTrampoline(self: Relocation, macho_file: *MachO) bool {
return switch (self.type) {
.branch => macho_file.getSymbol(self.target).undf(),
else => false,
};
}
pub fn getTargetBaseAddress(self: Relocation, macho_file: *MachO) ?u64 {
if (self.isStubTrampoline(macho_file)) {
const index = macho_file.stub_table.lookup.get(self.target) orelse return null;
const header = macho_file.sections.items(.header)[macho_file.stubs_section_index.?];
return header.addr +
index * @import("stubs.zig").calcStubEntrySize(macho_file.base.options.target.cpu.arch);
}
switch (self.type) {
.got, .got_page, .got_pageoff => {
const got_index = macho_file.got_table.lookup.get(self.target) orelse return null;
@ -56,17 +75,11 @@ pub fn getTargetBaseAddress(self: Relocation, macho_file: *MachO) ?u64 {
const atom = macho_file.getAtom(atom_index);
return atom.getSymbol(macho_file).n_value;
},
.branch => {
if (macho_file.stub_table.lookup.get(self.target)) |index| {
const header = macho_file.sections.items(.header)[macho_file.stubs_section_index.?];
return header.addr +
index * @import("stubs.zig").calcStubEntrySize(macho_file.base.options.target.cpu.arch);
}
const atom_index = macho_file.getAtomIndexForSymbol(self.target) orelse return null;
const atom = macho_file.getAtom(atom_index);
return atom.getSymbol(macho_file).n_value;
else => {
const target_atom_index = macho_file.getAtomIndexForSymbol(self.target) orelse return null;
const target_atom = macho_file.getAtom(target_atom_index);
return target_atom.getSymbol(macho_file).n_value;
},
else => return macho_file.getSymbol(self.target).n_value,
}
}

View File

@ -491,7 +491,10 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) {
// Careful enabling this test, fails randomly.
return error.SkipZigTest;
}
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -785,7 +785,6 @@ fn manyptrConcat(comptime s: [*:0]const u8) [*:0]const u8 {
test "comptime manyptr concatenation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s = "epic";

View File

@ -12,7 +12,6 @@ const Entry = packed struct {
};
test {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -32,7 +32,10 @@ test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) {
// Careful enabling this test, fails randomly.
return error.SkipZigTest;
}
var ram = try RAM.new();
var cpu = try CPU.new(&ram);

View File

@ -4,7 +4,6 @@ const expect = std.testing.expect;
test {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -532,7 +532,6 @@ test "@tagName of @typeInfo" {
}
test "static eval list init" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -335,7 +335,6 @@ fn numberLiteralArg(a: anytype) !void {
}
test "function call with anon list literal" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -356,7 +355,6 @@ test "function call with anon list literal" {
}
test "function call with anon list literal - 2D" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -734,7 +734,6 @@ test "small int addition" {
test "basic @mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
{
var a: u8 = 86;
@ -1126,7 +1125,6 @@ test "allow signed integer division/remainder when values are comptime-known and
test "quad hex float literal parsing accurate" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -232,7 +232,6 @@ test "nested packed structs" {
}
test "regular in irregular packed struct" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -621,7 +620,6 @@ test "store undefined to packed result location" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var x: u4 = 0;
var s = packed struct { x: u4, y: u4 }{ .x = x, .y = if (x > 0) x else undefined };

View File

@ -343,7 +343,6 @@ test "pointer sentinel with optional element" {
test "pointer sentinel with +inf" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -4,7 +4,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "@popCount integers" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -26,7 +26,6 @@ fn setFloat(foo: *FooWithFloats, x: f64) void {
}
test "init union with runtime value - floats" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -318,7 +318,6 @@ test "vector @splat" {
}
test "load vector elements via comptime index" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -340,7 +339,6 @@ test "load vector elements via comptime index" {
}
test "store vector elements via comptime index" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -368,7 +366,6 @@ test "store vector elements via comptime index" {
}
test "load vector elements via runtime index" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -390,7 +387,6 @@ test "load vector elements via runtime index" {
}
test "store vector elements via runtime index" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1100,6 +1096,7 @@ test "loading the second vector from a slice of vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@setRuntimeSafety(false);
var small_bases = [2]@Vector(2, u8){
@ -1200,6 +1197,7 @@ test "zero multiplicand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const zeros = @Vector(2, u32){ 0.0, 0.0 };
var ones = @Vector(2, u32){ 1.0, 1.0 };
@ -1259,6 +1257,7 @@ test "load packed vector element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var x: @Vector(2, u15) = .{ 1, 4 };
try expect((&x[0]).* == 1);
@ -1297,6 +1296,7 @@ test "store to vector in slice" {
test "addition of vectors represented as strings" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const V = @Vector(3, u8);
const foo: V = "foo".*;

View File

@ -390,6 +390,13 @@ fn addFromDirInner(
// Cross-product to get all possible test combinations
for (backends) |backend| {
for (targets) |target| {
if (backend == .stage2 and
target.getCpuArch() != .wasm32 and target.getCpuArch() != .x86_64)
{
// Other backends don't support new liveness format
continue;
}
const next = ctx.cases.items.len;
try ctx.cases.append(.{
.name = std.fs.path.stem(filename),