x86_64: add RM and MR helpers to codegen

This commit is contained in:
Jakub Konka 2023-03-09 20:32:07 +01:00
parent 9658ab6766
commit 32708dd6e2
4 changed files with 397 additions and 266 deletions

View File

@ -491,6 +491,72 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme
});
}
fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void {
const ops: Mir.Inst.Ops = switch (m) {
.sib => .m_sib,
.rip => .m_rip,
else => unreachable,
};
const data: Mir.Inst.Data = switch (ops) {
.m_sib => .{ .payload = try self.addExtra(Mir.MemorySib.encode(m)) },
.m_rip => .{ .payload = try self.addExtra(Mir.MemoryRip.encode(m)) },
else => unreachable,
};
_ = try self.addInst(.{
.tag = tag,
.ops = ops,
.data = data,
});
}
fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) !void {
const ops: Mir.Inst.Ops = switch (m) {
.sib => .rm_sib,
.rip => .rm_rip,
else => unreachable,
};
const data: Mir.Inst.Data = switch (ops) {
.rm_sib => .{ .rx = .{
.r1 = reg,
.payload = try self.addExtra(Mir.MemorySib.encode(m)),
} },
.rm_rip => .{ .rx = .{
.r1 = reg,
.payload = try self.addExtra(Mir.MemoryRip.encode(m)),
} },
else => unreachable,
};
_ = try self.addInst(.{
.tag = tag,
.ops = ops,
.data = data,
});
}
fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !void {
const ops: Mir.Inst.Ops = switch (m) {
.sib => .mr_sib,
.rip => .mr_rip,
else => unreachable,
};
const data: Mir.Inst.Data = switch (ops) {
.mr_sib => .{ .rx = .{
.r1 = reg,
.payload = try self.addExtra(Mir.MemorySib.encode(m)),
} },
.mr_rip => .{ .rx = .{
.r1 = reg,
.payload = try self.addExtra(Mir.MemoryRip.encode(m)),
} },
else => unreachable,
};
_ = try self.addInst(.{
.tag = tag,
.ops = ops,
.data = data,
});
}
fn gen(self: *Self) InnerError!void {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
@ -1741,23 +1807,10 @@ fn genIntMulDivOpMir(
switch (factor) {
.register => |reg| try self.asmRegister(tag, reg),
.stack_offset => |off| {
_ = off;
// _ = try self.addInst(.{
// .tag = tag,
// .ops = Mir.Inst.Ops.encode(.{
// .reg2 = .rbp,
// .flags = switch (abi_size) {
// 1 => 0b00,
// 2 => 0b01,
// 4 => 0b10,
// 8 => 0b11,
// else => unreachable,
// },
// }),
// .data = .{ .disp = -off },
// });
},
.stack_offset => |off| try self.asmMemory(tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
})),
else => unreachable,
}
}
@ -2222,19 +2275,10 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
const addr_reg = try self.register_manager.allocReg(null, gp);
switch (slice_mcv) {
.stack_offset => |off| {
_ = off;
// mov reg, [rbp - 8]
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = addr_reg.to64(),
// .reg2 = .rbp,
// .flags = 0b01,
// }),
// .data = .{ .disp = -@intCast(i32, off) },
// });
},
.stack_offset => |off| try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
.base = .rbp,
.disp = -off,
})),
else => return self.fail("TODO implement slice_elem_ptr when slice is {}", .{slice_mcv}),
}
// TODO we could allocate register here, but need to expect addr register and potentially
@ -2309,27 +2353,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
array_ty.abiAlignment(self.target.*),
));
try self.genSetStack(array_ty, off, array, .{});
// lea reg, [rbp]
// _ = try self.addInst(.{
// .tag = .lea,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = addr_reg.to64(),
// .reg2 = .rbp,
// }),
// .data = .{ .disp = -off },
// });
try self.asmRegisterMemory(.lea, addr_reg.to64(), Memory.sib(.qword, .{
.base = .rbp,
.disp = -off,
}));
},
.stack_offset => |off| {
_ = off;
// lea reg, [rbp]
// _ = try self.addInst(.{
// .tag = .lea,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = addr_reg.to64(),
// .reg2 = .rbp,
// }),
// .data = .{ .disp = -off },
// });
try self.asmRegisterMemory(.lea, addr_reg.to64(), Memory.sib(.qword, .{
.base = .rbp,
.disp = -off,
}));
},
.memory, .linker_load => {
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
@ -2366,7 +2399,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
const elem_ty = ptr_ty.elemType2();
const elem_abi_size = elem_ty.abiSize(self.target.*);
const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
const index_ty = self.air.typeOf(bin_op.rhs);
const index = try self.resolveInst(bin_op.rhs);
const index_lock: ?RegisterLock = switch (index) {
@ -2386,16 +2419,14 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
if (elem_abi_size > 8) {
return self.fail("TODO copy value with size {} from pointer", .{elem_abi_size});
} else {
// mov dst_mcv, [dst_mcv]
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)),
// .reg2 = dst_mcv.register,
// .flags = 0b01,
// }),
// .data = .{ .disp = 0 },
// });
try self.asmRegisterMemory(
.mov,
registerAlias(dst_mcv.register, elem_abi_size),
Memory.sib(Memory.PtrSize.fromSize(elem_abi_size), .{
.base = dst_mcv.register,
.disp = 0,
}),
);
break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) };
}
};
@ -2622,7 +2653,7 @@ fn reuseOperand(
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
const abi_size = elem_ty.abiSize(self.target.*);
const abi_size = @intCast(u32, elem_ty.abiSize(self.target.*));
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@ -2649,17 +2680,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.undef => unreachable,
.eflags => unreachable,
.register => |dst_reg| {
_ = dst_reg;
// mov dst_reg, [reg]
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = registerAlias(dst_reg, @intCast(u32, abi_size)),
// .reg2 = reg,
// .flags = 0b01,
// }),
// .data = .{ .disp = 0 },
// });
try self.asmRegisterMemory(
.mov,
registerAlias(dst_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg, .disp = 0 }),
);
},
.stack_offset => |off| {
if (abi_size <= 8) {
@ -2874,17 +2899,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
try self.loadMemPtrIntoRegister(addr_reg, ptr_ty, ptr);
// to get the actual address of the value we want to modify we have to go through the GOT
// mov reg, [reg]
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = addr_reg.to64(),
// .reg2 = addr_reg.to64(),
// .flags = 0b01,
// }),
// .data = .{ .disp = 0 },
// });
// To get the actual address of the value we want to modify we have to go through the GOT
try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
.base = addr_reg.to64(),
.disp = 0,
}));
const new_ptr = MCValue{ .register = addr_reg.to64() };
@ -2936,16 +2955,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
defer self.register_manager.unlockReg(tmp_reg_lock);
try self.loadMemPtrIntoRegister(tmp_reg, value_ty, value);
try self.asmRegisterMemory(.mov, tmp_reg, Memory.sib(.qword, .{
.base = tmp_reg,
.disp = 0,
}));
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = tmp_reg,
// .reg2 = tmp_reg,
// .flags = 0b01,
// }),
// .data = .{ .disp = 0 },
// });
return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
}
@ -3603,15 +3617,11 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
// _ = try self.addInst(.{
// .tag = mir_tag,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = registerAlias(dst_reg, abi_size),
// .reg2 = .rbp,
// .flags = 0b01,
// }),
// .data = .{ .disp = -off },
// });
try self.asmRegisterMemory(
mir_tag,
registerAlias(dst_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
);
},
}
},
@ -3629,16 +3639,10 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.dead, .unreach => unreachable,
.register_overflow => unreachable,
.register => |src_reg| {
_ = src_reg;
// _ = try self.addInst(.{
// .tag = mir_tag,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = .rbp,
// .reg2 = registerAlias(src_reg, abi_size),
// .flags = 0b10,
// }),
// .data = .{ .disp = -off },
// });
try self.asmMemoryRegister(mir_tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}), registerAlias(src_reg, abi_size));
},
.immediate => |imm| {
_ = imm;
@ -3738,16 +3742,11 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
}
},
.stack_offset => |off| {
_ = off;
// _ = try self.addInst(.{
// .tag = .imul_complex,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = registerAlias(dst_reg, abi_size),
// .reg2 = .rbp,
// .flags = 0b01,
// }),
// .data = .{ .disp = -off },
// });
try self.asmRegisterMemory(
.imul,
registerAlias(dst_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
);
},
.memory => {
return self.fail("TODO implement x86 multiply source memory", .{});
@ -3770,17 +3769,11 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.register => |src_reg| {
// copy dst to a register
const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
_ = src_reg;
// multiply into dst_reg
// register, register
// _ = try self.addInst(.{
// .tag = .imul_complex,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = registerAlias(dst_reg, abi_size),
// .reg2 = registerAlias(src_reg, abi_size),
// }),
// .data = undefined,
// });
try self.asmRegisterRegister(
.imul,
registerAlias(dst_reg, abi_size),
registerAlias(src_reg, abi_size),
);
// copy dst_reg back out
return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{});
},
@ -4006,11 +3999,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.stack_byte_count > 0) {
// Adjust the stack
// _ = try self.addInst(.{
// .tag = .sub,
// .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
// .data = .{ .imm = info.stack_byte_count },
// });
try self.asmRegisterImmediate(.sub, .rsp, Immediate.u(info.stack_byte_count));
}
// Due to incremental compilation, how function calls are generated depends
@ -4161,7 +4150,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.addInst(.{
.tag = .jmp,
.tag = .jmp_reloc,
.ops = .inst,
.data = .{ .inst = undefined },
});
@ -4197,7 +4186,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
const jmp_reloc = try self.addInst(.{
.tag = .jmp,
.tag = .jmp_reloc,
.ops = .inst,
.data = .{ .inst = undefined },
});
@ -4738,7 +4727,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const jmp_target = @intCast(u32, self.mir_instructions.len);
try self.genBody(body);
_ = try self.addInst(.{
.tag = .jmp,
.tag = .jmp_reloc,
.ops = .inst,
.data = .{ .inst = jmp_target },
});
@ -5035,7 +5024,7 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
.jcc => {
self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst;
},
.jmp => {
.jmp_reloc => {
self.mir_instructions.items(.data)[reloc].inst = next_inst;
},
else => unreachable,
@ -5078,7 +5067,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
// Leave the jump offset undefined
const jmp_reloc = try self.addInst(.{
.tag = .jmp,
.tag = .jmp_reloc,
.ops = .inst,
.data = .{ .inst = undefined },
});
@ -5247,7 +5236,7 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
}
fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerError!void {
const abi_size = ty.abiSize(self.target.*);
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (mcv) {
.dead => unreachable,
.unreach, .none => return,
@ -5325,36 +5314,25 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.{ty.fmtDebug()},
),
};
_ = tag;
_ = reg;
// _ = try self.addInst(.{
// .tag = tag,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = switch (ty.tag()) {
// .f32 => .esp,
// .f64 => .rsp,
// else => unreachable,
// },
// .reg2 = reg.to128(),
// .flags = 0b01,
// }),
// .data = .{ .disp = -stack_offset },
// });
return;
// TODO verify this
const ptr_size: Memory.PtrSize = switch (ty.tag()) {
.f32 => .dword,
.f64 => .qword,
else => unreachable,
};
return self.asmMemoryRegister(tag, Memory.sib(ptr_size, .{
.base = .rsp,
.disp = -stack_offset,
}), reg.to128());
}
return self.fail("TODO genSetStackArg for register with no intrinsics", .{});
},
else => {
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = .rsp,
// .reg2 = registerAlias(reg, @intCast(u32, abi_size)),
// .flags = 0b10,
// }),
// .data = .{ .disp = -stack_offset },
// });
try self.asmMemoryRegister(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rsp,
.disp = -stack_offset,
}), registerAlias(reg, abi_size));
},
}
},
@ -5507,25 +5485,23 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
switch (ty.zigTypeTag()) {
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
// const tag: Mir.Inst.Tag = switch (ty.tag()) {
// .f32 => Mir.Inst.Tag.mov_f32,
// .f64 => Mir.Inst.Tag.mov_f64,
// else => return self.fail("TODO genSetStack for register for type {}", .{ty.fmtDebug()}),
// };
// _ = try self.addInst(.{
// .tag = tag,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = switch (ty.tag()) {
// .f32 => base_reg.to32(),
// .f64 => base_reg.to64(),
// else => unreachable,
// },
// .reg2 = reg.to128(),
// .flags = 0b01,
// }),
// .data = .{ .disp = -stack_offset },
// });
return;
const tag: Mir.Inst.Tag = switch (ty.tag()) {
.f32 => .movss,
.f64 => .movsd,
else => return self.fail(
"TODO genSetStack for register for type {}",
.{ty.fmtDebug()},
),
};
const ptr_size: Memory.PtrSize = switch (ty.tag()) {
.f32 => .dword,
.f64 => .qword,
else => unreachable,
};
return self.asmMemoryRegister(tag, Memory.sib(ptr_size, .{
.base = base_reg.to64(),
.disp = -stack_offset,
}), reg.to128());
}
return self.fail("TODO genSetStack for register for type float with no intrinsics", .{});
@ -5590,16 +5566,10 @@ fn genInlineMemcpyRegisterRegister(
var remainder = abi_size;
while (remainder > 0) {
const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder));
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = dst_reg,
// .reg2 = registerAlias(tmp_reg, nearest_power_of_two),
// .flags = 0b10,
// }),
// .data = .{ .disp = -next_offset },
// });
try self.asmMemoryRegister(.mov, Memory.sib(Memory.PtrSize.fromSize(nearest_power_of_two), .{
.base = dst_reg,
.disp = -next_offset,
}), registerAlias(tmp_reg, nearest_power_of_two));
if (nearest_power_of_two > 1) {
try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{
@ -5611,15 +5581,10 @@ fn genInlineMemcpyRegisterRegister(
next_offset -= nearest_power_of_two;
}
} else {
// _ = try self.addInst(.{
// .tag = .mov,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = dst_reg,
// .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)),
// .flags = 0b10,
// }),
// .data = .{ .disp = -offset },
// });
try self.asmMemoryRegister(.mov, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = dst_reg,
.disp = -offset,
}), registerAlias(src_reg, abi_size));
}
}
@ -5660,15 +5625,10 @@ fn genInlineMemcpy(
try self.loadMemPtrIntoRegister(dst_addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
_ = off;
// _ = try self.addInst(.{
// .tag = .lea,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = dst_addr_reg.to64(),
// .reg2 = opts.dest_stack_base orelse .rbp,
// }),
// .data = .{ .disp = -off },
// });
try self.asmRegisterMemory(.lea, dst_addr_reg.to64(), Memory.sib(.qword, .{
.base = opts.dest_stack_base orelse .rbp,
.disp = -off,
}));
},
.register => |reg| {
try self.asmRegisterRegister(
@ -5754,7 +5714,7 @@ fn genInlineMemcpy(
try self.asmRegisterImmediate(.sub, count_reg, Immediate.u(1));
_ = try self.addInst(.{
.tag = .jmp,
.tag = .jmp_reloc,
.ops = .inst,
.data = .{ .inst = loop_start },
});
@ -5857,7 +5817,7 @@ fn genInlineMemset(
try self.asmRegisterImmediate(.sub, index_reg, Immediate.u(1));
_ = try self.addInst(.{
.tag = .jmp,
.tag = .jmp_reloc,
.ops = .inst,
.data = .{ .inst = loop_start },
});
@ -6045,19 +6005,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// .data = .{ .disp = @intCast(i32, x) },
// });
} else {
// If this is RAX, we can use a direct load.
// Otherwise, we need to load the address, then indirectly load the value.
if (reg.to64() == .rax) {
// movabs rax, ds:moffs64
// const payload = try self.addExtra(Mir.Imm64.encode(x));
// _ = try self.addInst(.{
// .tag = .movabs,
// .ops = Mir.Inst.Ops.encode(.{
// .reg1 = .rax,
// .flags = 0b01, // imm64 will become moffs64
// }),
// .data = .{ .payload = payload },
// });
// If this is RAX, we can use a direct load.
// Otherwise, we need to load the address, then indirectly load the value.
var moffs: Mir.MemoryMoffs = .{
.seg = @enumToInt(Register.ds),
.msb = undefined,
.lsb = undefined,
};
moffs.encodeOffset(x);
_ = try self.addInst(.{
.tag = .mov_moffs,
.ops = .rax_moffs,
.data = .{ .payload = try self.addExtra(moffs) },
});
} else {
// Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
try self.genSetReg(ty, reg, MCValue{ .immediate = x });

View File

@ -73,6 +73,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.adc,
.add,
.@"and",
.call,
.cbw,
.cwde,
.cdqe,
@ -86,6 +87,8 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.idiv,
.imul,
.int3,
.jmp,
.lea,
.mov,
.movzx,
.mul,
@ -115,9 +118,9 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.ucomisd,
=> try emit.mirEncodeGeneric(tag, inst),
.call,
.jmp,
=> try emit.mirCallJmp(inst),
.jmp_reloc => try emit.mirJmpReloc(inst),
.mov_moffs => try emit.mirMovMoffs(inst),
.movsx => try emit.mirMovsx(inst),
.cmovcc => try emit.mirCmovcc(inst),
@ -130,8 +133,6 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.push_regs => try emit.mirPushPopRegisterList(.push, inst),
.pop_regs => try emit.mirPushPopRegisterList(.pop, inst),
else => return emit.fail("Implement MIR->Emit lowering for x86_64 for pseudo-inst: {}", .{tag}),
}
}
@ -212,6 +213,34 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
.{ .imm = Immediate.u(Mir.Imm64.decode(imm64)) },
};
},
.m_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
operands[0] = .{ .mem = Mir.MemorySib.decode(msib) };
},
.m_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
operands[0] = .{ .mem = Mir.MemoryRip.decode(mrip) };
},
.rm_sib, .mr_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
const op1 = .{ .reg = data.rx.r1 };
const op2 = .{ .mem = Mir.MemorySib.decode(msib) };
switch (ops) {
.rm_sib => operands[0..2].* = .{ op1, op2 },
.mr_sib => operands[0..2].* = .{ op2, op1 },
else => unreachable,
}
},
.rm_rip, .mr_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
const op1 = .{ .reg = data.rx.r1 };
const op2 = .{ .mem = Mir.MemoryRip.decode(mrip) };
switch (ops) {
.rm_rip => operands[0..2].* = .{ op1, op2 },
.mr_rip => operands[0..2].* = .{ op2, op1 },
else => unreachable,
}
},
else => unreachable, // TODO
}
@ -223,6 +252,29 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE
});
}
fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const payload = emit.mir.instructions.items(.data)[inst].payload;
const moffs = emit.mir.extraData(Mir.MemoryMoffs, payload).data;
const seg = @intToEnum(Register, moffs.seg);
const offset = moffs.decodeOffset();
switch (ops) {
.rax_moffs => {
try emit.encode(.mov, .{
.op1 = .{ .reg = .rax },
.op2 = .{ .mem = Memory.moffs(seg, offset) },
});
},
.moffs_rax => {
try emit.encode(.mov, .{
.op1 = .{ .mem = Memory.moffs(seg, offset) },
.op2 = .{ .reg = .rax },
});
},
else => unreachable,
}
}
fn mirMovsx(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const data = emit.mir.instructions.items(.data)[inst];
@ -302,19 +354,13 @@ fn mirJcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
}
fn mirCallJmp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
const mnemonic: Instruction.Mnemonic = switch (tag) {
.call => .call,
.jmp => .jmp,
else => unreachable,
};
fn mirJmpReloc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.inst => {
const target = emit.mir.instructions.items(.data)[inst].inst;
const source = emit.code.items.len;
try emit.encode(mnemonic, .{
try emit.encode(.jmp, .{
.op1 = .{ .imm = Immediate.s(0) },
});
try emit.relocs.append(emit.bin_file.allocator, .{
@ -324,19 +370,7 @@ fn mirCallJmp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 5,
});
},
.r => {
const reg = emit.mir.instructions.items(.data)[inst].r;
try emit.encode(mnemonic, .{
.op1 = .{ .reg = reg },
});
},
.imm_s => {
const imm = emit.mir.instructions.items(.data)[inst].imm_s;
try emit.encode(mnemonic, .{
.op1 = .{ .imm = Immediate.s(imm) },
});
},
else => unreachable, // TODO
else => unreachable,
}
}

View File

@ -17,6 +17,7 @@ const encoder = @import("encoder.zig");
const Air = @import("../../Air.zig");
const CodeGen = @import("CodeGen.zig");
const IntegerBitSet = std.bit_set.IntegerBitSet;
const Memory = bits.Memory;
const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
@ -135,6 +136,12 @@ pub const Inst = struct {
/// Conditional move
cmovcc,
/// Mov absolute to/from memory wrt segment register to/from rax
mov_moffs,
/// Jump with relocation to another local MIR instruction
jmp_reloc,
/// End of prologue
dbg_prologue_end,
/// Start of epilogue
@ -186,24 +193,48 @@ pub const Inst = struct {
/// Relative displacement operand.
/// Uses `rel` payload.
rel,
/// Register, memory operands.
/// Register, memory (SIB) operands.
/// Uses `rx` payload.
rm,
rm_sib,
/// Register, memory (RIP) operands.
/// Uses `rx` payload.
rm_rip,
/// Register, memory, immediate (unsigned) operands
/// Uses `rx` payload.
rmi_u,
/// Register, memory, immediate (sign-extended) operands
/// Uses `rx` payload.
rmi_s,
/// Memory, immediate (unsigned) operands.
/// Uses `payload` payload.
mi_u,
/// Memory, immediate (sign-extend) operands.
/// Uses `payload` payload.
mi_s,
/// Memory, register operands.
/// Uses `payload` payload.
mr,
/// Single memory (SIB) operand.
/// Uses `payload` with extra data of type `MemorySib`.
m_sib,
/// Single memory (RIP) operand.
/// Uses `payload` with extra data of type `MemoryRip`.
m_rip,
/// Memory (SIB), immediate (unsigned) operands.
/// Uses `xi_u` payload with extra data of type `MemorySib`.
mi_u_sib,
/// Memory (RIP), immediate (unsigned) operands.
/// Uses `xi_u` payload with extra data of type `MemoryRip`.
mi_u_rip,
/// Memory (SIB), immediate (sign-extend) operands.
/// Uses `xi_s` payload with extra data of type `MemorySib`.
mi_s_sib,
/// Memory (RIP), immediate (sign-extend) operands.
/// Uses `xi_s` payload with extra data of type `MemoryRip`.
mi_s_rip,
/// Memory (SIB), register operands.
/// Uses `rx` payload with extra data of type `MemorySib`.
mr_sib,
/// Memory (RIP), register operands.
/// Uses `rx` payload with extra data of type `MemoryRip`.
mr_rip,
/// Rax, Memory moffs.
/// Uses `payload` with extra data of type `MemoryMoffs`.
rax_moffs,
/// Memory moffs, rax.
/// Uses `payload` with extra data of type `MemoryMoffs`.
moffs_rax,
/// Lea into register with linker relocation.
/// Uses `payload` payload with data of type `LeaRegisterReloc`.
lea_r_reloc,
@ -274,6 +305,16 @@ pub const Inst = struct {
r1: Register,
payload: u32,
},
/// Custom payload followed by an unsigned immediate.
xi_u: struct {
payload: u32,
imm: u32,
},
/// Custom payload followed by a signed immediate.
xi_s: struct {
payload: u32,
imm: i32,
},
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
@ -378,6 +419,90 @@ pub const Imm64 = struct {
}
};
// TODO this can be further compacted using packed struct
pub const MemorySib = struct {
/// Size of the pointer.
ptr_size: u32,
/// Base register. -1 means null, or no base register.
base: i32,
/// Scale for index register. -1 means null, or no scale.
/// This has to be in sync with `index` field.
scale: i32,
/// Index register. -1 means null, or no index register.
/// This has to be in sync with `scale` field.
index: i32,
/// Displacement value.
disp: i32,
pub fn encode(mem: Memory) MemorySib {
const sib = mem.sib;
return .{
.ptr_size = @enumToInt(sib.ptr_size),
.base = if (sib.base) |r| @enumToInt(r) else -1,
.scale = if (sib.scale_index) |si| si.scale else -1,
.index = if (sib.scale_index) |si| @enumToInt(si.index) else -1,
.disp = sib.disp,
};
}
pub fn decode(msib: MemorySib) Memory {
const base: ?Register = if (msib.base == -1) null else @intToEnum(Register, msib.base);
const scale_index: ?Memory.ScaleIndex = if (msib.index == -1) null else .{
.scale = @intCast(u4, msib.scale),
.index = @intToEnum(Register, msib.index),
};
const mem: Memory = .{ .sib = .{
.ptr_size = @intToEnum(Memory.PtrSize, msib.ptr_size),
.base = base,
.scale_index = scale_index,
.disp = msib.disp,
} };
return mem;
}
};
pub const MemoryRip = struct {
/// Size of the pointer.
ptr_size: u32,
/// Displacement value.
disp: i32,
pub fn encode(mem: Memory) MemoryRip {
return .{
.ptr_size = @enumToInt(mem.rip.ptr_size),
.disp = mem.rip.disp,
};
}
pub fn decode(mrip: MemoryRip) Memory {
return .{ .rip = .{
.ptr_size = @intToEnum(Memory.PtrSize, mrip.ptr_size),
.disp = mrip.disp,
} };
}
};
pub const MemoryMoffs = struct {
/// Segment register.
seg: u32,
/// Absolute offset wrt to the segment register split between MSB and LSB parts much like
/// `Imm64` payload.
msb: u32,
lsb: u32,
pub fn encodeOffset(moffs: *MemoryMoffs, v: u64) void {
moffs.msb = @truncate(u32, v >> 32);
moffs.lsb = @truncate(u32, v);
}
pub fn decodeOffset(moffs: *const MemoryMoffs) u64 {
var res: u64 = 0;
res |= (@intCast(u64, moffs.msb) << 32);
res |= @intCast(u64, moffs.lsb);
return res;
}
};
pub const DbgLineColumn = struct {
line: u32,
column: u32,

View File

@ -417,6 +417,17 @@ pub const Memory = union(enum) {
qword,
tbyte,
pub fn fromSize(size: u32) PtrSize {
return switch (size) {
1 => .byte,
2 => .word,
4 => .dword,
8 => .qword,
10 => .tbyte,
else => unreachable,
};
}
pub fn fromBitSize(bit_size: u64) PtrSize {
return switch (bit_size) {
8 => .byte,