x86_64: fix unordered float equality

This commit is contained in:
Jacob Young 2023-05-06 06:00:22 -04:00
parent ae588a09f2
commit 5d4288c5f6
8 changed files with 602 additions and 366 deletions

View File

@ -205,16 +205,7 @@ pub const MCValue = union(enum) {
fn isMemory(mcv: MCValue) bool {
return switch (mcv) {
.memory,
.load_direct,
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.load_frame,
.lea_frame,
=> true,
.memory, .indirect, .load_frame => true,
else => false,
};
}
@ -937,7 +928,7 @@ fn formatWipMir(
.target = data.self.target,
.src_loc = data.self.src_loc,
};
for (lower.lowerMir(data.self.mir_instructions.get(data.inst)) catch |err| switch (err) {
for ((lower.lowerMir(data.inst) catch |err| switch (err) {
error.LowerFail => {
defer {
lower.err_msg.?.deinit(data.self.gpa);
@ -955,7 +946,7 @@ fn formatWipMir(
return;
},
else => |e| return e,
}) |lower_inst| try writer.print(" | {}", .{lower_inst});
}).insts) |lowered_inst| try writer.print(" | {}", .{lowered_inst});
}
fn fmtWipMir(self: *Self, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) {
return .{ .data = .{ .self = self, .inst = inst } };
@ -1016,7 +1007,14 @@ fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void {
_ = try self.addInst(.{
.tag = .setcc,
.ops = .r_cc,
.data = .{ .r_cc = .{ .r = reg, .cc = cc } },
.data = .{ .r_cc = .{
.r = reg,
.scratch = if (cc == .z_and_np or cc == .nz_or_p)
(try self.register_manager.allocReg(null, gp)).to8()
else
.none,
.cc = cc,
} },
});
}
@ -1028,23 +1026,36 @@ fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void {
.rip => .m_rip_cc,
else => unreachable,
},
.data = .{ .x_cc = .{ .cc = cc, .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
} } },
.data = .{ .x_cc = .{
.scratch = if (cc == .z_and_np or cc == .nz_or_p)
(try self.register_manager.allocReg(null, gp)).to8()
else
.none,
.cc = cc,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
/// A `cc` of `.z_and_np` clobbers `reg2`!
fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void {
_ = try self.addInst(.{
.tag = .cmovcc,
.ops = .rr_cc,
.data = .{ .rr_cc = .{ .r1 = reg1, .r2 = reg2, .cc = cc } },
.data = .{ .rr_cc = .{
.r1 = reg1,
.r2 = reg2,
.cc = cc,
} },
});
}
fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condition) !void {
assert(cc != .z_and_np); // not supported
_ = try self.addInst(.{
.tag = .cmovcc,
.ops = switch (m) {
@ -1052,11 +1063,15 @@ fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condi
.rip => .rm_rip_cc,
else => unreachable,
},
.data = .{ .rx_cc = .{ .r = reg, .cc = cc, .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
} } },
.data = .{ .rx_cc = .{
.r = reg,
.cc = cc,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@ -1131,10 +1146,13 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme
.tag = tag,
.ops = ops,
.data = switch (ops) {
.ri_s, .ri_u => .{ .ri = .{ .r = reg, .i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
} } },
.ri_s, .ri_u => .{ .ri = .{
.r = reg,
.i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
},
} },
.ri64 => .{ .rx = .{
.r = reg,
.payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)),
@ -1171,10 +1189,14 @@ fn asmRegisterRegisterImmediate(
.signed => .rri_s,
.unsigned => .rri_u,
},
.data = .{ .rri = .{ .r1 = reg1, .r2 = reg2, .i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
} } },
.data = .{ .rri = .{
.r1 = reg1,
.r2 = reg2,
.i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
},
} },
});
}
@ -1202,11 +1224,14 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) !
.rip => .rm_rip,
else => unreachable,
},
.data = .{ .rx = .{ .r = reg, .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
} } },
.data = .{ .rx = .{
.r = reg,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@ -1224,11 +1249,43 @@ fn asmRegisterMemoryImmediate(
.rip => .rmi_rip,
else => unreachable,
},
.data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
.data = .{ .rix = .{
.r = reg,
.i = @intCast(u8, imm.unsigned),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
fn asmRegisterRegisterMemoryImmediate(
self: *Self,
tag: Mir.Inst.Tag,
reg1: Register,
reg2: Register,
m: Memory,
imm: Immediate,
) !void {
_ = try self.addInst(.{
.tag = tag,
.ops = switch (m) {
.sib => .rrmi_sib,
.rip => .rrmi_rip,
else => unreachable,
} } },
},
.data = .{ .rrix = .{
.r1 = reg1,
.r2 = reg2,
.i = @intCast(u8, imm.unsigned),
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@ -1240,11 +1297,14 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !
.rip => .mr_rip,
else => unreachable,
},
.data = .{ .rx = .{ .r = reg, .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
} } },
.data = .{ .rx = .{
.r = reg,
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@ -1262,14 +1322,17 @@ fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate)
},
else => unreachable,
},
.data = .{ .ix = .{ .i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
}, .payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
} } },
.data = .{ .ix = .{
.i = switch (imm) {
.signed => |s| @bitCast(u32, s),
.unsigned => |u| @intCast(u32, u),
},
.payload = switch (m) {
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
else => unreachable,
},
} },
});
}
@ -6612,11 +6675,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
_ = try self.addInst(.{
.tag = .mov_linker,
.ops = .import_reloc,
.data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{
.reg = @enumToInt(Register.rax),
.atom_index = atom_index,
.sym_index = sym_index,
}) },
.data = .{ .rx = .{
.r = .rax,
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
}),
} },
});
try self.asmRegister(.call, .rax);
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
@ -6695,8 +6760,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty = self.air.typeOf(bin_op.lhs);
const ty_abi_size = ty.abiSize(self.target.*);
const can_reuse = ty_abi_size <= 8;
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
@ -6715,69 +6778,93 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
};
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
const dst_mem_ok = !ty.isRuntimeFloat();
var flipped = false;
const dst_mcv: MCValue = if (can_reuse and !lhs_mcv.isImmediate() and
(dst_mem_ok or lhs_mcv.isRegister()) and self.liveness.operandDies(inst, 0))
lhs_mcv
else if (can_reuse and !rhs_mcv.isImmediate() and
(dst_mem_ok or rhs_mcv.isRegister()) and self.liveness.operandDies(inst, 1))
dst: {
flipped = true;
break :dst rhs_mcv;
} else if (dst_mem_ok) dst: {
const dst_mcv = try self.allocTempRegOrMem(ty, true);
try self.genCopy(ty, dst_mcv, lhs_mcv);
break :dst dst_mcv;
} else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
const dst_lock = switch (dst_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
};
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
switch (ty.zigTypeTag()) {
else => try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv),
.Float => switch (ty.floatBits(self.target.*)) {
16 => if (self.hasFeature(.f16c)) {
const dst_reg = dst_mcv.getReg().?.to128();
const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128();
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
if (src_mcv.isRegister())
try self.asmRegisterRegisterRegister(
.vpunpcklwd,
dst_reg,
dst_reg,
src_mcv.getReg().?.to128(),
)
else
try self.asmRegisterMemoryImmediate(
.vpinsrw,
dst_reg,
src_mcv.mem(.word),
Immediate.u(1),
);
try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg);
try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg);
try self.genBinOpMir(.ucomiss, ty, dst_mcv, .{ .register = tmp_reg });
} else return self.fail("TODO implement airCmp for {}", .{
ty.fmt(self.bin_file.options.module.?),
}),
32 => try self.genBinOpMir(.ucomiss, ty, dst_mcv, src_mcv),
64 => try self.genBinOpMir(.ucomisd, ty, dst_mcv, src_mcv),
else => return self.fail("TODO implement airCmp for {}", .{
ty.fmt(self.bin_file.options.module.?),
}),
},
}
const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
const result = MCValue{
.eflags = Condition.fromCompareOperator(signedness, if (flipped) op.reverse() else op),
.eflags = switch (ty.zigTypeTag()) {
else => result: {
var flipped = false;
const dst_mcv: MCValue = if (lhs_mcv.isRegister() or lhs_mcv.isMemory())
lhs_mcv
else if (rhs_mcv.isRegister() or rhs_mcv.isMemory()) dst: {
flipped = true;
break :dst rhs_mcv;
} else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) };
const dst_lock = switch (dst_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
};
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv);
break :result Condition.fromCompareOperator(
if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned,
if (flipped) op.reverse() else op,
);
},
.Float => result: {
const flipped = switch (op) {
.lt, .lte => true,
.eq, .gte, .gt, .neq => false,
};
const dst_mcv = if (flipped) rhs_mcv else lhs_mcv;
const dst_reg = if (dst_mcv.isRegister())
dst_mcv.getReg().?
else
try self.copyToTmpRegister(ty, dst_mcv);
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
switch (ty.floatBits(self.target.*)) {
16 => if (self.hasFeature(.f16c)) {
const tmp1_reg = (try self.register_manager.allocReg(null, sse)).to128();
const tmp1_mcv = MCValue{ .register = tmp1_reg };
const tmp1_lock = self.register_manager.lockRegAssumeUnused(tmp1_reg);
defer self.register_manager.unlockReg(tmp1_lock);
const tmp2_reg = (try self.register_manager.allocReg(null, sse)).to128();
const tmp2_mcv = MCValue{ .register = tmp2_reg };
const tmp2_lock = self.register_manager.lockRegAssumeUnused(tmp2_reg);
defer self.register_manager.unlockReg(tmp2_lock);
if (src_mcv.isRegister())
try self.asmRegisterRegisterRegister(
.vpunpcklwd,
tmp1_reg,
dst_reg.to128(),
src_mcv.getReg().?.to128(),
)
else
try self.asmRegisterRegisterMemoryImmediate(
.vpinsrw,
tmp1_reg,
dst_reg.to128(),
src_mcv.mem(.word),
Immediate.u(1),
);
try self.asmRegisterRegister(.vcvtph2ps, tmp1_reg, tmp1_reg);
try self.asmRegisterRegister(.vmovshdup, tmp2_reg, tmp1_reg);
try self.genBinOpMir(.ucomiss, ty, tmp1_mcv, tmp2_mcv);
} else return self.fail("TODO implement airCmp for {}", .{
ty.fmt(self.bin_file.options.module.?),
}),
32 => try self.genBinOpMir(.ucomiss, ty, .{ .register = dst_reg }, src_mcv),
64 => try self.genBinOpMir(.ucomisd, ty, .{ .register = dst_reg }, src_mcv),
else => return self.fail("TODO implement airCmp for {}", .{
ty.fmt(self.bin_file.options.module.?),
}),
}
break :result switch (if (flipped) op.reverse() else op) {
.lt, .lte => unreachable, // required to have been canonicalized to gt(e)
.gt => .a,
.gte => .ae,
.eq => .z_and_np,
.neq => .nz_or_p,
};
},
},
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -7929,11 +8016,13 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
_ = try self.addInst(.{
.tag = .mov_linker,
.ops = .direct_reloc,
.data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{
.reg = @enumToInt(dst_reg.to64()),
.atom_index = atom_index,
.sym_index = sym_index,
}) },
.data = .{ .rx = .{
.r = dst_reg.to64(),
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
}),
} },
});
return;
},
@ -7975,11 +8064,13 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.lea_got => .got_reloc,
else => unreachable,
},
.data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{
.reg = @enumToInt(dst_reg.to64()),
.atom_index = atom_index,
.sym_index = sym_index,
}) },
.data = .{ .rx = .{
.r = dst_reg.to64(),
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
}),
} },
});
},
.lea_tlv => |sym_index| {
@ -7988,11 +8079,13 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
_ = try self.addInst(.{
.tag = .lea_linker,
.ops = .tlv_reloc,
.data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{
.reg = @enumToInt(Register.rdi),
.atom_index = atom_index,
.sym_index = sym_index,
}) },
.data = .{ .rx = .{
.r = .rdi,
.payload = try self.addExtra(Mir.Reloc{
.atom_index = atom_index,
.sym_index = sym_index,
}),
} },
});
// TODO: spill registers before calling
try self.asmMemory(.call, Memory.sib(.qword, .{ .base = .{ .reg = .rdi } }));
@ -8463,14 +8556,20 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
try self.spillEflagsIfOccupied();
if (val_abi_size <= 8) {
_ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{
.r = registerAlias(new_reg.?, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
_ = try self.addInst(.{
.tag = .cmpxchg,
.ops = .lock_mr_sib,
.data = .{ .rx = .{
.r = registerAlias(new_reg.?, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
} else {
_ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} });
_ = try self.addInst(.{
.tag = .cmpxchgb,
.ops = .lock_m_sib,
.data = .{ .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)) },
});
}
const result: MCValue = result: {
@ -8571,14 +8670,18 @@ fn atomicOp(
if (rmw_op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) {
try self.genUnOpMir(.neg, val_ty, dst_mcv);
}
_ = try self.addInst(.{ .tag = tag, .ops = switch (tag) {
.mov, .xchg => .mr_sib,
.xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib,
else => unreachable,
}, .data = .{ .rx = .{
.r = registerAlias(dst_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
_ = try self.addInst(.{
.tag = tag,
.ops = switch (tag) {
.mov, .xchg => .mr_sib,
.xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib,
else => unreachable,
},
.data = .{ .rx = .{
.r = registerAlias(dst_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
return if (unused) .unreach else dst_mcv;
},
@ -8645,10 +8748,14 @@ fn atomicOp(
}
},
};
_ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{
.r = registerAlias(tmp_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
_ = try self.addInst(.{
.tag = .cmpxchg,
.ops = .lock_mr_sib,
.data = .{ .rx = .{
.r = registerAlias(tmp_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} },
});
_ = try self.asmJccReloc(loop, .ne);
return if (unused) .unreach else .{ .register = .rax };
} else {

View File

@ -18,142 +18,149 @@ pub const Error = Lower.Error || error{
};
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |i| {
const index = @intCast(Mir.Inst.Index, i);
const inst = emit.lower.mir.instructions.get(index);
const start_offset = @intCast(u32, emit.code.items.len);
try emit.code_offset_mapping.putNoClobber(emit.lower.allocator, index, start_offset);
for (try emit.lower.lowerMir(inst)) |lower_inst| try lower_inst.encode(emit.code.writer(), .{});
const end_offset = @intCast(u32, emit.code.items.len);
switch (inst.tag) {
else => {},
.jmp_reloc => try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = inst.data.inst,
.offset = end_offset - 4,
.length = 5,
}),
.call_extern => if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom_index = macho_file.getAtomIndexForSymbol(
.{ .sym_index = inst.data.relocation.atom_index, .file = null },
).?;
const target = macho_file.getGlobalByIndex(inst.data.relocation.sym_index);
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = .branch,
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index = @intCast(Mir.Inst.Index, mir_i);
try emit.code_offset_mapping.putNoClobber(
emit.lower.allocator,
mir_index,
@intCast(u32, emit.code.items.len),
);
const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset = @intCast(u32, emit.code.items.len);
try lowered_inst.encode(emit.code.writer(), .{});
const end_offset = @intCast(u32, emit.code.items.len);
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
lowered_relocs = lowered_relocs[1..];
}) switch (lowered_relocs[0].target) {
.inst => |target| try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = target,
.offset = end_offset - 4,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom_index = coff_file.getAtomIndexForSymbol(
.{ .sym_index = inst.data.relocation.atom_index, .file = null },
).?;
const target = coff_file.getGlobalByIndex(inst.data.relocation.sym_index);
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = end_offset - 4,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else return emit.fail("TODO implement {} for {}", .{ inst.tag, emit.bin_file.tag }),
.length = @intCast(u5, end_offset - start_offset),
}),
.@"extern" => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom_index = macho_file.getAtomIndexForSymbol(
.{ .sym_index = symbol.atom_index, .file = null },
).?;
const target = macho_file.getGlobalByIndex(symbol.sym_index);
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = .branch,
.target = target,
.offset = end_offset - 4,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom_index = coff_file.getAtomIndexForSymbol(
.{ .sym_index = symbol.atom_index, .file = null },
).?;
const target = coff_file.getGlobalByIndex(symbol.sym_index);
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = end_offset - 4,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else return emit.fail("TODO implement extern reloc for {s}", .{
@tagName(emit.bin_file.tag),
}),
.linker_got,
.linker_direct,
.linker_import,
.linker_tlv,
=> |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = macho_file.getAtomIndexForSymbol(.{
.sym_index = symbol.atom_index,
.file = null,
}).?;
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = switch (lowered_relocs[0].target) {
.linker_got => .got,
.linker_direct => .signed,
.linker_tlv => .tlv,
else => unreachable,
},
.target = .{ .sym_index = symbol.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom_index = coff_file.getAtomIndexForSymbol(.{
.sym_index = symbol.atom_index,
.file = null,
}).?;
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (lowered_relocs[0].target) {
.linker_got => .got,
.linker_direct => .direct,
.linker_import => .import,
else => unreachable,
},
.target = switch (lowered_relocs[0].target) {
.linker_got,
.linker_direct,
=> .{ .sym_index = symbol.sym_index, .file = null },
.linker_import => coff_file.getGlobalByIndex(symbol.sym_index),
else => unreachable,
},
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else return emit.fail("TODO implement linker reloc for {s}", .{
@tagName(emit.bin_file.tag),
}),
};
}
std.debug.assert(lowered_relocs.len == 0);
.mov_linker, .lea_linker => if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const metadata =
emit.lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const atom_index = macho_file.getAtomIndexForSymbol(.{
.sym_index = metadata.atom_index,
.file = null,
}).?;
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = switch (inst.ops) {
.got_reloc => .got,
.direct_reloc => .signed,
.tlv_reloc => .tlv,
else => unreachable,
},
.target = .{ .sym_index = metadata.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const metadata =
emit.lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const atom_index = coff_file.getAtomIndexForSymbol(.{
.sym_index = metadata.atom_index,
.file = null,
}).?;
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (inst.ops) {
.got_reloc => .got,
.direct_reloc => .direct,
.import_reloc => .import,
else => unreachable,
},
.target = switch (inst.ops) {
.got_reloc,
.direct_reloc,
=> .{ .sym_index = metadata.sym_index, .file = null },
.import_reloc => coff_file.getGlobalByIndex(metadata.sym_index),
else => unreachable,
},
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else return emit.fail("TODO implement {} for {}", .{ inst.tag, emit.bin_file.tag }),
.jcc => try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = inst.data.inst_cc.inst,
.offset = end_offset - 4,
.length = 6,
}),
.dbg_line => try emit.dbgAdvancePCAndLine(
inst.data.line_column.line,
inst.data.line_column.column,
),
.dbg_prologue_end => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.dbg_epilogue_begin => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
if (lowered.insts.len == 0) {
const mir_inst = emit.lower.mir.instructions.get(mir_index);
switch (mir_inst.tag) {
else => unreachable,
.dead => {},
.dbg_line => try emit.dbgAdvancePCAndLine(
mir_inst.data.line_column.line,
mir_inst.data.line_column.column,
),
.dbg_prologue_end => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.dbg_epilogue_begin => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
}
}
}
try emit.fixupRelocs();

View File

@ -5,13 +5,22 @@ mir: Mir,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
result: [
result_insts_len: u8 = undefined,
result_relocs_len: u8 = undefined,
result_insts: [
std.mem.max(usize, &.{
abi.Win64.callee_preserved_regs.len,
abi.SysV.callee_preserved_regs.len,
2, // cmovcc: cmovcc \ cmovcc
3, // setcc: setcc \ setcc \ logicop
2, // jcc: jcc \ jcc
abi.Win64.callee_preserved_regs.len, // push_regs/pop_regs
abi.SysV.callee_preserved_regs.len, // push_regs/pop_regs
})
]Instruction = undefined,
result_len: usize = undefined,
result_relocs: [
std.mem.max(usize, &.{
2, // jcc: jcc \ jcc
})
]Reloc = undefined,
pub const Error = error{
OutOfMemory,
@ -20,13 +29,35 @@ pub const Error = error{
CannotEncode,
};
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction {
lower.result = undefined;
errdefer lower.result = undefined;
lower.result_len = 0;
defer lower.result_len = undefined;
pub const Reloc = struct {
lowered_inst_index: u8,
target: Target,
const Target = union(enum) {
inst: Mir.Inst.Index,
@"extern": Mir.Reloc,
linker_got: Mir.Reloc,
linker_direct: Mir.Reloc,
linker_import: Mir.Reloc,
linker_tlv: Mir.Reloc,
};
};
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
insts: []const Instruction,
relocs: []const Reloc,
} {
lower.result_insts = undefined;
lower.result_relocs = undefined;
errdefer lower.result_insts = undefined;
errdefer lower.result_relocs = undefined;
lower.result_insts_len = 0;
lower.result_relocs_len = 0;
defer lower.result_insts_len = undefined;
defer lower.result_relocs_len = undefined;
const inst = lower.mir.instructions.get(index);
switch (inst.tag) {
.adc,
.add,
@ -185,22 +216,26 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction {
.cmpxchgb => try lower.mirCmpxchgBytes(inst),
.jmp_reloc => try lower.emit(.none, .jmp, &.{.{ .imm = Immediate.s(0) }}),
.jmp_reloc => try lower.emitInstWithReloc(.none, .jmp, &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst }),
.call_extern => try lower.emit(.none, .call, &.{.{ .imm = Immediate.s(0) }}),
.call_extern => try lower.emitInstWithReloc(.none, .call, &.{
.{ .imm = Immediate.s(0) },
}, .{ .@"extern" = inst.data.relocation }),
.lea_linker => try lower.mirLeaLinker(inst),
.mov_linker => try lower.mirMovLinker(inst),
.lea_linker => try lower.mirLinker(.lea, inst),
.mov_linker => try lower.mirLinker(.mov, inst),
.mov_moffs => try lower.mirMovMoffs(inst),
.movsx => try lower.mirMovsx(inst),
.cmovcc => try lower.mirCmovcc(inst),
.setcc => try lower.mirSetcc(inst),
.jcc => try lower.emit(.none, mnem_cc(.j, inst.data.inst_cc.cc), &.{.{ .imm = Immediate.s(0) }}),
.jcc => try lower.mirJcc(index, inst),
.push_regs => try lower.mirPushPopRegisterList(inst, .push),
.pop_regs => try lower.mirPushPopRegisterList(inst, .pop),
.push_regs => try lower.mirRegisterList(.push, inst),
.pop_regs => try lower.mirRegisterList(.pop, inst),
.dbg_line,
.dbg_prologue_end,
@ -209,7 +244,10 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction {
=> {},
}
return lower.result[0..lower.result_len];
return .{
.insts = lower.result_insts[0..lower.result_insts_len],
.relocs = lower.result_relocs[0..lower.result_relocs_len],
};
}
pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
@ -221,7 +259,10 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
fn mnem_cc(comptime base: @Type(.EnumLiteral), cc: bits.Condition) Mnemonic {
return switch (cc) {
inline else => |c| @field(Mnemonic, @tagName(base) ++ @tagName(c)),
inline else => |c| if (@hasField(Mnemonic, @tagName(base) ++ @tagName(c)))
@field(Mnemonic, @tagName(base) ++ @tagName(c))
else
unreachable,
};
}
@ -247,6 +288,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.rmi_rip,
.mri_sib,
.mri_rip,
.rrmi_sib,
.rrmi_rip,
=> Immediate.u(i),
.ri64 => Immediate.u(lower.mir.extraData(Mir.Imm64, i).data.decode()),
@ -267,6 +310,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
.mr_sib,
.mrr_sib,
.mri_sib,
.rrmi_sib,
.lock_m_sib,
.lock_mi_sib_u,
.lock_mi_sib_s,
@ -283,6 +327,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
.mr_rip,
.mrr_rip,
.mri_rip,
.rrmi_rip,
.lock_m_rip,
.lock_mi_rip_u,
.lock_mi_rip_s,
@ -298,13 +343,28 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
});
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
lower.result[lower.result_len] = try Instruction.new(prefix, mnemonic, ops);
lower.result_len += 1;
fn emitInst(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops);
lower.result_insts_len += 1;
}
fn emitInstWithReloc(
lower: *Lower,
prefix: Prefix,
mnemonic: Mnemonic,
ops: []const Operand,
target: Reloc.Target,
) Error!void {
lower.result_relocs[lower.result_relocs_len] = .{
.lowered_inst_index = lower.result_insts_len,
.target = target,
};
lower.result_relocs_len += 1;
try lower.emitInst(prefix, mnemonic, ops);
}
fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
try lower.emit(switch (inst.ops) {
try lower.emitInst(switch (inst.ops) {
else => .none,
.lock_m_sib,
.lock_m_rip,
@ -389,13 +449,19 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .reg = inst.data.rix.r },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
.rrmi_sib, .rrmi_rip => &.{
.{ .reg = inst.data.rrix.r1 },
.{ .reg = inst.data.rrix.r2 },
.{ .mem = lower.mem(inst.ops, inst.data.rrix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rrix.i) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
});
}
fn mirString(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.string => try lower.emit(switch (inst.data.string.repeat) {
.string => try lower.emitInst(switch (inst.data.string.repeat) {
inline else => |repeat| @field(Prefix, @tagName(repeat)),
}, switch (inst.tag) {
inline .cmps, .lods, .movs, .scas, .stos => |tag| switch (inst.data.string.width) {
@ -414,7 +480,7 @@ fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void {
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
try lower.emit(switch (inst.ops) {
try lower.emitInst(switch (inst.ops) {
.m_sib, .m_rip => .none,
.lock_m_sib, .lock_m_rip => .lock,
else => unreachable,
@ -426,7 +492,7 @@ fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void {
}
fn mirMovMoffs(lower: *Lower, inst: Mir.Inst) Error!void {
try lower.emit(switch (inst.ops) {
try lower.emitInst(switch (inst.ops) {
.rax_moffs, .moffs_rax => .none,
.lock_moffs_rax => .lock,
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
@ -455,7 +521,7 @@ fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void {
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
try lower.emit(.none, switch (ops[0].bitSize()) {
try lower.emitInst(.none, switch (ops[0].bitSize()) {
32, 64 => switch (ops[1].bitSize()) {
32 => .movsxd,
else => .movsx,
@ -465,32 +531,82 @@ fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void {
}
fn mirCmovcc(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.rr_cc => try lower.emit(.none, mnem_cc(.cmov, inst.data.rr_cc.cc), &.{
const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) {
.rr_cc => .{ .cc = inst.data.rr_cc.cc, .ops = .{
.{ .reg = inst.data.rr_cc.r1 },
.{ .reg = inst.data.rr_cc.r2 },
}),
.rm_sib_cc, .rm_rip_cc => try lower.emit(.none, mnem_cc(.cmov, inst.data.rx_cc.cc), &.{
} },
.rm_sib_cc, .rm_rip_cc => .{ .cc = inst.data.rx_cc.cc, .ops = .{
.{ .reg = inst.data.rx_cc.r },
.{ .mem = lower.mem(inst.ops, inst.data.rx_cc.payload) },
}),
} },
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
switch (data.cc) {
else => |cc| try lower.emitInst(.none, mnem_cc(.cmov, cc), &data.ops),
.z_and_np => {
try lower.emitInst(.none, mnem_cc(.cmov, .nz), &.{ data.ops[1], data.ops[0] });
try lower.emitInst(.none, mnem_cc(.cmov, .np), &data.ops);
},
.nz_or_p => {
try lower.emitInst(.none, mnem_cc(.cmov, .nz), &data.ops);
try lower.emitInst(.none, mnem_cc(.cmov, .p), &data.ops);
},
}
}
fn mirSetcc(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.r_cc => try lower.emit(.none, mnem_cc(.set, inst.data.r_cc.cc), &.{
const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) {
.r_cc => .{ .cc = inst.data.r_cc.cc, .ops = .{
.{ .reg = inst.data.r_cc.r },
}),
.m_sib_cc, .m_rip_cc => try lower.emit(.none, mnem_cc(.set, inst.data.x_cc.cc), &.{
.{ .reg = inst.data.r_cc.scratch },
} },
.m_sib_cc, .m_rip_cc => .{ .cc = inst.data.x_cc.cc, .ops = .{
.{ .mem = lower.mem(inst.ops, inst.data.x_cc.payload) },
}),
.{ .reg = inst.data.x_cc.scratch },
} },
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
switch (data.cc) {
else => |cc| try lower.emitInst(.none, mnem_cc(.set, cc), data.ops[0..1]),
.z_and_np => {
try lower.emitInst(.none, mnem_cc(.set, .z), data.ops[0..1]);
try lower.emitInst(.none, mnem_cc(.set, .np), data.ops[1..2]);
try lower.emitInst(.none, .@"and", data.ops[0..2]);
},
.nz_or_p => {
try lower.emitInst(.none, mnem_cc(.set, .nz), data.ops[0..1]);
try lower.emitInst(.none, mnem_cc(.set, .p), data.ops[1..2]);
try lower.emitInst(.none, .@"or", data.ops[0..2]);
},
}
}
fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst, comptime mnemonic: Mnemonic) Error!void {
fn mirJcc(lower: *Lower, index: Mir.Inst.Index, inst: Mir.Inst) Error!void {
switch (inst.data.inst_cc.cc) {
else => |cc| try lower.emitInstWithReloc(.none, mnem_cc(.j, cc), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst }),
.z_and_np => {
try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = index + 1 });
try lower.emitInstWithReloc(.none, mnem_cc(.j, .np), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst });
},
.nz_or_p => {
try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst });
try lower.emitInstWithReloc(.none, mnem_cc(.j, .p), &.{
.{ .imm = Immediate.s(0) },
}, .{ .inst = inst.data.inst_cc.inst });
},
}
}
fn mirRegisterList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const reg_list = Mir.RegisterList.fromInt(inst.data.payload);
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*);
var it = reg_list.iterator(.{ .direction = switch (mnemonic) {
@ -498,24 +614,20 @@ fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst, comptime mnemonic: Mnem
.pop => .forward,
else => unreachable,
} });
while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
while (it.next()) |i| try lower.emitInst(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
}
fn mirLeaLinker(lower: *Lower, inst: Mir.Inst) Error!void {
const metadata = lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const reg = @intToEnum(Register, metadata.reg);
try lower.emit(.none, .lea, &.{
.{ .reg = reg },
.{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
});
}
fn mirMovLinker(lower: *Lower, inst: Mir.Inst) Error!void {
const metadata = lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const reg = @intToEnum(Register, metadata.reg);
try lower.emit(.none, .mov, &.{
.{ .reg = reg },
.{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
fn mirLinker(lower: *Lower, mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const reloc = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
try lower.emitInstWithReloc(.none, mnemonic, &.{
.{ .reg = inst.data.rx.r },
.{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(inst.data.rx.r.bitSize()), 0) },
}, switch (inst.ops) {
.got_reloc => .{ .linker_got = reloc },
.direct_reloc => .{ .linker_direct = reloc },
.import_reloc => .{ .linker_import = reloc },
.tlv_reloc => .{ .linker_tlv = reloc },
else => unreachable,
});
}

View File

@ -434,6 +434,12 @@ pub const Inst = struct {
/// Register, memory (SIB), immediate (byte) operands.
/// Uses `rix` payload with extra data of type `MemorySib`.
rmi_sib,
/// Register, register, memory (RIP), immediate (byte) operands.
/// Uses `rrix` payload with extra data of type `MemoryRip`.
rrmi_rip,
/// Register, register, memory (SIB), immediate (byte) operands.
/// Uses `rrix` payload with extra data of type `MemorySib`.
rrmi_sib,
/// Register, memory (RIP), immediate (byte) operands.
/// Uses `rix` payload with extra data of type `MemoryRip`.
rmi_rip,
@ -524,16 +530,16 @@ pub const Inst = struct {
/// Uses `reloc` payload.
reloc,
/// Linker relocation - GOT indirection.
/// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
/// Uses `rx` payload with extra data of type `Reloc`.
got_reloc,
/// Linker relocation - direct reference.
/// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
/// Uses `rx` payload with extra data of type `Reloc`.
direct_reloc,
/// Linker relocation - imports table indirection (binding).
/// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
/// Uses `rx` payload with extra data of type `Reloc`.
import_reloc,
/// Linker relocation - threadlocal variable via GOT indirection.
/// Uses `payload` payload with extra data of type `LeaRegisterReloc`.
/// Uses `rx` payload with extra data of type `Reloc`.
tlv_reloc,
};
@ -567,12 +573,14 @@ pub const Inst = struct {
},
/// Condition code (CC), followed by custom payload found in extra.
x_cc: struct {
scratch: Register,
cc: bits.Condition,
payload: u32,
},
/// Register with condition code (CC).
r_cc: struct {
r: Register,
scratch: Register,
cc: bits.Condition,
},
/// Register, register with condition code (CC).
@ -614,6 +622,13 @@ pub const Inst = struct {
i: u8,
payload: u32,
},
/// Register, register, byte immediate, followed by Custom payload found in extra.
rrix: struct {
r1: Register,
r2: Register,
i: u8,
payload: u32,
},
/// String instruction prefix and width.
string: struct {
repeat: bits.StringRepeat,
@ -622,12 +637,7 @@ pub const Inst = struct {
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
relocation: struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's symbol table.
sym_index: u32,
},
relocation: Reloc,
/// Debug line and column position
line_column: struct {
line: u32,
@ -646,9 +656,7 @@ pub const Inst = struct {
}
};
pub const LeaRegisterReloc = struct {
/// Destination register.
reg: u32,
pub const Reloc = struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's symbol table.

View File

@ -72,6 +72,12 @@ pub const Condition = enum(u5) {
/// zero
z,
// Pseudo conditions
/// zero and not parity
z_and_np,
/// not zero or parity
nz_or_p,
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes signed comparison
@ -143,6 +149,9 @@ pub const Condition = enum(u5) {
.po => .pe,
.s => .ns,
.z => .nz,
.z_and_np => .nz_or_p,
.nz_or_p => .z_and_np,
};
}
};

View File

@ -245,9 +245,9 @@ pub const Instruction = struct {
},
.mem => |mem| {
const op = switch (data.op_en) {
.m, .mi, .m1, .mc => .none,
.m, .mi, .m1, .mc, .vmi => .none,
.mr, .mri, .mrc => inst.ops[1],
.rm, .rmi => inst.ops[0],
.rm, .rmi, .rvm, .rvmi => inst.ops[0],
else => unreachable,
};
try encodeMemory(enc, mem, op, encoder);

View File

@ -29,7 +29,6 @@ test "inf >= 1" {
test "isNan(nan * 1)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const nan_times_one = comptime std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
@ -37,7 +36,6 @@ test "isNan(nan * 1)" {
test "runtime isNan(nan * 1)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const nan_times_one = std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
@ -45,7 +43,6 @@ test "runtime isNan(nan * 1)" {
test "isNan(nan * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const nan_times_zero = comptime std.math.nan(f64) * 0;
try std.testing.expect(std.math.isNan(nan_times_zero));
@ -55,7 +52,6 @@ test "isNan(nan * 0)" {
test "isNan(inf * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const inf_times_zero = comptime std.math.inf(f64) * 0;
try std.testing.expect(std.math.isNan(inf_times_zero));
@ -65,7 +61,6 @@ test "isNan(inf * 0)" {
test "runtime isNan(nan * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const nan_times_zero = std.math.nan(f64) * 0;
try std.testing.expect(std.math.isNan(nan_times_zero));
@ -75,7 +70,6 @@ test "runtime isNan(nan * 0)" {
test "runtime isNan(inf * 0)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const inf_times_zero = std.math.inf(f64) * 0;
try std.testing.expect(std.math.isNan(inf_times_zero));

View File

@ -2,7 +2,6 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "@fieldParentPtr non-first field" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testParentFieldPtr(&foo.c);