Merge pull request #15087 from jacobly0/x86-debug-mir

x86_64: misc fixes
This commit is contained in:
Jakub Konka 2023-03-27 16:20:27 +02:00 committed by GitHub
commit 28d6dd75ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 2546 additions and 2366 deletions

View File

@ -49,6 +49,20 @@ pub fn MultiArrayList(comptime S: type) type {
return casted_ptr[0..self.len];
}
pub fn set(self: Slice, index: usize, elem: S) void {
inline for (fields) |field_info| {
self.items(@field(Field, field_info.name))[index] = @field(elem, field_info.name);
}
}
pub fn get(self: Slice, index: usize) S {
var elem: S = undefined;
inline for (fields) |field_info| {
@field(elem, field_info.name) = self.items(@field(Field, field_info.name))[index];
}
return elem;
}
pub fn toMultiArrayList(self: Slice) Self {
if (self.ptrs.len == 0) {
return .{};
@ -156,20 +170,12 @@ pub fn MultiArrayList(comptime S: type) type {
/// Overwrite one array element with new data.
pub fn set(self: *Self, index: usize, elem: S) void {
const slices = self.slice();
inline for (fields, 0..) |field_info, i| {
slices.items(@intToEnum(Field, i))[index] = @field(elem, field_info.name);
}
return self.slice().set(index, elem);
}
/// Obtain all the data for one array element.
pub fn get(self: Self, index: usize) S {
const slices = self.slice();
var result: S = undefined;
inline for (fields, 0..) |field_info, i| {
@field(result, field_info.name) = slices.items(@intToEnum(Field, i))[index];
}
return result;
return self.slice().get(index);
}
/// Extend the list by 1 element. Allocates more memory as necessary.

View File

@ -20,6 +20,7 @@ const ErrorMsg = Module.ErrorMsg;
const Result = codegen.Result;
const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig");
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const Target = std.Target;
@ -44,6 +45,8 @@ const sse = abi.RegisterClass.sse;
const InnerError = CodeGenError || error{OutOfRegisters};
const debug_wip_mir = false;
gpa: Allocator,
air: Air,
liveness: Liveness,
@ -267,6 +270,12 @@ pub fn generate(
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
if (debug_wip_mir) {
const stderr = std.io.getStdErr().writer();
fn_owner_decl.renderFullyQualifiedName(mod, stderr) catch {};
stderr.writeAll(":\n") catch {};
}
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
try branch_stack.ensureUnusedCapacity(2);
// The outermost branch is used for constants only.
@ -341,19 +350,22 @@ pub fn generate(
defer mir.deinit(bin_file.allocator);
var emit = Emit{
.mir = mir,
.lower = .{
.allocator = bin_file.allocator,
.mir = mir,
.target = &bin_file.options.target,
.src_loc = src_loc,
},
.bin_file = bin_file,
.debug_output = debug_output,
.target = &bin_file.options.target,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = module_fn.lbrace_line,
.prev_di_column = module_fn.lbrace_column,
};
defer emit.deinit();
emit.lowerMir() catch |err| switch (err) {
error.EmitFail => return Result{ .fail = emit.err_msg.? },
emit.emitMir() catch |err| switch (err) {
error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? },
error.InvalidInstruction, error.CannotEncode => |e| {
const msg = switch (e) {
error.InvalidInstruction => "CodeGen failed to find a viable instruction.",
@ -378,11 +390,49 @@ pub fn generate(
}
}
fn dumpWipMir(self: *Self, inst: Mir.Inst) !void {
if (!debug_wip_mir) return;
const stderr = std.io.getStdErr().writer();
var lower = Lower{
.allocator = self.gpa,
.mir = .{
.instructions = self.mir_instructions.slice(),
.extra = self.mir_extra.items,
},
.target = self.target,
.src_loc = self.src_loc,
};
for (lower.lowerMir(inst) catch |err| switch (err) {
error.LowerFail => {
defer {
lower.err_msg.?.deinit(self.gpa);
lower.err_msg = null;
}
try stderr.print("{s}\n", .{lower.err_msg.?.msg});
return;
},
error.InvalidInstruction, error.CannotEncode => |e| {
try stderr.writeAll(switch (e) {
error.InvalidInstruction => "CodeGen failed to find a viable instruction.\n",
error.CannotEncode => "CodeGen failed to encode the instruction.\n",
});
return;
},
else => |e| return e,
}) |lower_inst| {
try stderr.writeAll(" | ");
try lower_inst.fmtPrint(stderr);
try stderr.writeByte('\n');
}
}
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
const gpa = self.gpa;
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
self.mir_instructions.appendAssumeCapacity(inst);
self.dumpWipMir(inst) catch {};
return result_index;
}
@ -842,8 +892,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
if (builtin.mode == .Debug) {
try self.mir_to_air_map.put(@intCast(u32, self.mir_instructions.len), inst);
try self.mir_to_air_map.put(@intCast(Mir.Inst.Index, self.mir_instructions.len), inst);
}
if (debug_wip_mir) @import("../../print_air.zig").dumpInst(
inst,
self.bin_file.options.module.?,
self.air,
self.liveness,
);
switch (air_tags[inst]) {
// zig fmt: off
@ -1479,43 +1535,31 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const src_ty = self.air.typeOf(ty_op.operand);
const dst_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const src_ty_size = src_ty.abiSize(self.target.*);
const dst_ty_size = dst_ty.abiSize(self.target.*);
if (src_ty_size > 8 or dst_ty_size > 8) {
return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
}
const operand_lock: ?RegisterLock = switch (operand) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const reg: Register = blk: {
if (operand.isRegister()) {
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :blk operand.register.to64();
}
const result = if (self.liveness.isUnused(inst)) .dead else result: {
const dst_ty = self.air.typeOfIndex(inst);
const dst_abi_size = dst_ty.abiSize(self.target.*);
if (dst_abi_size > 8) {
return self.fail("TODO implement trunc for abi sizes larger than 8", .{});
}
const mcv = try self.copyToRegisterWithInstTracking(inst, src_ty, operand);
break :blk mcv.register.to64();
const src_mcv = try self.resolveInst(ty_op.operand);
const src_lock = switch (src_mcv) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
};
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
// when truncating a `u16` to `u5`, for example, those top 3 bits in the result
// have to be removed. this only happens if the dst if not a power-of-two size.
if (self.regExtraBits(dst_ty) > 0) try self.truncateRegister(dst_ty, dst_mcv.register.to64());
break :result dst_mcv;
};
// when truncating a `u16` to `u5`, for example, those top 3 bits in the result
// have to be removed. this only happens if the dst if not a power-of-two size.
if (self.regExtraBits(dst_ty) > 0) {
try self.truncateRegister(dst_ty, reg);
}
return self.finishAir(inst, .{ .register = reg }, .{ ty_op.operand, .none, .none });
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
@ -1628,7 +1672,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void {
const reg_bits = self.regBitSize(ty);
const cc: Condition = if (ty.isSignedInt()) cc: {
try self.genSetReg(ty, limit_reg, dst_mcv);
try self.genBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genBinOpMir(.xor, ty, limit_mcv, .{
.immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
});
@ -1681,7 +1725,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
const reg_bits = self.regBitSize(ty);
const cc: Condition = if (ty.isSignedInt()) cc: {
try self.genSetReg(ty, limit_reg, dst_mcv);
try self.genBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genBinOpMir(.xor, ty, limit_mcv, .{
.immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
});
@ -1735,7 +1779,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
const cc: Condition = if (ty.isSignedInt()) cc: {
try self.genSetReg(ty, limit_reg, lhs_mcv);
try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv);
try self.genBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 });
try self.genBinOpMir(.xor, ty, limit_mcv, .{
.immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1,
});
@ -2509,16 +2553,13 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
const dst_mcv: MCValue = blk: {
switch (operand) {
.stack_offset => |off| {
break :blk MCValue{ .stack_offset = off };
},
else => return self.fail("TODO implement slice_ptr for {}", .{operand}),
}
};
const result = if (self.liveness.isUnused(inst)) .dead else result: {
const src_mcv = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv;
const dst_mcv = try self.allocRegOrMem(inst, true);
const dst_ty = self.air.typeOfIndex(inst);
try self.setRegOrMem(dst_ty, dst_mcv, src_mcv);
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -3040,7 +3081,8 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (mat_src_lock) |lock| self.register_manager.unlockReg(lock);
const dst_mcv: MCValue = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
const dst_mcv: MCValue =
if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
.{ .register = try self.register_manager.allocReg(inst, gp) };
@ -3432,23 +3474,21 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const elem_ty = self.air.typeOfIndex(inst);
const elem_size = elem_ty.abiSize(self.target.*);
const result: MCValue = result: {
if (!elem_ty.hasRuntimeBitsIgnoreComptime())
break :result MCValue.none;
if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr = try self.resolveInst(ty_op.operand);
const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
if (self.liveness.isUnused(inst) and !is_volatile)
break :result MCValue.dead;
if (self.liveness.isUnused(inst) and !is_volatile) break :result .dead;
const dst_mcv: MCValue = blk: {
if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value.
break :blk ptr;
} else {
break :blk try self.allocRegOrMem(inst, true);
}
};
log.debug("airLoad(%{d}): {} <- {}", .{ inst, dst_mcv, ptr });
const dst_mcv: MCValue = if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr))
// The MCValue that holds the pointer can be re-used as the value.
ptr
else
try self.allocRegOrMem(inst, true);
try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand));
break :result dst_mcv;
};
@ -3943,9 +3983,6 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue
.register_overflow => unreachable,
.register => |dst_reg| try self.asmRegister(mir_tag, registerAlias(dst_reg, abi_size)),
.ptr_stack_offset, .stack_offset => |off| {
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
if (abi_size > 8) {
return self.fail("TODO implement {} for stack dst with large ABI", .{mir_tag});
}
@ -4387,9 +4424,6 @@ fn genBinOp(
if (lhs_ty.zigTypeTag() == .Vector) {
return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmt(self.bin_file.options.module.?)});
}
if (lhs_ty.abiSize(self.target.*) > 8) {
return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmt(self.bin_file.options.module.?)});
}
switch (lhs) {
.immediate => |imm| switch (imm) {
@ -4402,7 +4436,7 @@ fn genBinOp(
else => {},
}
const is_commutative: bool = switch (tag) {
const is_commutative = switch (tag) {
.add,
.addwrap,
.bool_or,
@ -4416,6 +4450,20 @@ fn genBinOp(
else => false,
};
const needs_reg_dst = switch (tag) {
.add,
.addwrap,
.sub,
.subwrap,
.mul,
.div_float,
.div_exact,
.div_trunc,
.div_floor,
=> lhs_ty.isRuntimeFloat(),
else => false,
};
const lhs_lock: ?RegisterLock = switch (lhs) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
@ -4432,10 +4480,10 @@ fn genBinOp(
var flipped: bool = false;
const dst_mcv: MCValue = blk: {
if (maybe_inst) |inst| {
if (lhs.isRegister() and self.reuseOperand(inst, lhs_air, 0, lhs)) {
if ((!needs_reg_dst or lhs.isRegister()) and self.reuseOperand(inst, lhs_air, 0, lhs)) {
break :blk lhs;
}
if (is_commutative and rhs.isRegister() and self.reuseOperand(inst, rhs_air, 1, rhs)) {
if (is_commutative and (!needs_reg_dst or rhs.isRegister()) and self.reuseOperand(inst, rhs_air, 1, rhs)) {
flipped = true;
break :blk rhs;
}
@ -4485,33 +4533,37 @@ fn genBinOp(
.div_float,
.div_exact,
=> try self.genBinOpMir(switch (lhs_ty.tag()) {
.f32 => .divss,
.f64 => .divsd,
else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }),
}, lhs_ty, dst_mcv, src_mcv),
.div_trunc,
.div_floor,
=> {
try self.genBinOpMir(switch (lhs_ty.tag()) {
.f32 => .divss,
.f64 => .divsd,
else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }),
else => return self.fail("TODO implement genBinOp for {s} {}", .{
@tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?),
}),
}, lhs_ty, dst_mcv, src_mcv);
if (Target.x86.featureSetHas(self.target.cpu.features, .sse4_1)) {
const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*));
const dst_alias = registerAlias(dst_mcv.register, abi_size);
try self.asmRegisterRegisterImmediate(switch (lhs_ty.tag()) {
.f32 => .roundss,
.f64 => .roundsd,
else => unreachable,
}, dst_alias, dst_alias, Immediate.u(switch (tag) {
.div_trunc => 0b1_0_11,
.div_floor => 0b1_0_01,
else => unreachable,
}));
} else return self.fail("TODO implement round without sse4_1", .{});
switch (tag) {
.div_float,
.div_exact,
=> {},
.div_trunc,
.div_floor,
=> if (Target.x86.featureSetHas(self.target.cpu.features, .sse4_1)) {
const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*));
const dst_alias = registerAlias(dst_mcv.register, abi_size);
try self.asmRegisterRegisterImmediate(switch (lhs_ty.tag()) {
.f32 => .roundss,
.f64 => .roundsd,
else => unreachable,
}, dst_alias, dst_alias, Immediate.u(switch (tag) {
.div_trunc => 0b1_0_11,
.div_floor => 0b1_0_01,
else => unreachable,
}));
} else return self.fail("TODO implement round without sse4_1", .{}),
else => unreachable,
}
},
.ptr_add,
@ -4568,7 +4620,13 @@ fn genBinOp(
};
const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*));
switch (dst_mcv) {
const tmp_reg = switch (dst_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(lhs_ty, dst_mcv),
};
const tmp_lock = self.register_manager.lockReg(tmp_reg);
defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock);
switch (mat_src_mcv) {
.none,
.undef,
.dead,
@ -4576,57 +4634,43 @@ fn genBinOp(
.immediate,
.eflags,
.register_overflow,
.stack_offset,
.ptr_stack_offset,
.memory,
.linker_load,
=> unreachable,
.register => |dst_reg| switch (mat_src_mcv) {
.none,
.undef,
.dead,
.unreach,
.immediate,
.eflags,
.register_overflow,
.ptr_stack_offset,
=> unreachable,
.register => |src_reg| try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, abi_size),
registerAlias(src_reg, abi_size),
.register => |src_reg| try self.asmCmovccRegisterRegister(
registerAlias(tmp_reg, abi_size),
registerAlias(src_reg, abi_size),
cc,
),
.stack_offset => |off| try self.asmCmovccRegisterMemory(
registerAlias(tmp_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}),
cc,
),
.memory, .linker_load => {
const addr_reg = (try self.register_manager.allocReg(null, gp)).to64();
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_reg_lock);
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, mat_src_mcv);
// To get the actual address of the value we want to modify we
// we have to go through the GOT
try self.asmRegisterMemory(
.mov,
addr_reg,
Memory.sib(.qword, .{ .base = addr_reg }),
);
try self.asmCmovccRegisterMemory(
registerAlias(tmp_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = addr_reg }),
cc,
),
.stack_offset => |off| try self.asmCmovccRegisterMemory(
registerAlias(dst_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}),
cc,
),
.memory, .linker_load => {
const addr_reg = (try self.register_manager.allocReg(null, gp)).to64();
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_reg_lock);
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, dst_mcv);
// To get the actual address of the value we want to modify we
// we have to go through the GOT
try self.asmRegisterMemory(
.mov,
addr_reg,
Memory.sib(.qword, .{ .base = addr_reg }),
);
try self.asmCmovccRegisterMemory(
registerAlias(dst_reg, abi_size),
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = addr_reg }),
cc,
);
},
);
},
}
try self.setRegOrMem(lhs_ty, dst_mcv, .{ .register = tmp_reg });
},
.Float => try self.genBinOpMir(switch (lhs_ty.tag()) {
.f32 => switch (tag) {
@ -4649,8 +4693,8 @@ fn genBinOp(
return dst_mcv;
}
fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
switch (dst_mcv) {
.none => unreachable,
.undef => unreachable,
@ -4667,12 +4711,12 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
const reg = try self.copyToTmpRegister(dst_ty, src_mcv);
return self.genBinOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg });
const reg = try self.copyToTmpRegister(ty, src_mcv);
return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .register = reg });
},
.register => |src_reg| switch (dst_ty.zigTypeTag()) {
.register => |src_reg| switch (ty.zigTypeTag()) {
.Float => {
if (intrinsicsAllowed(self.target.*, dst_ty)) {
if (intrinsicsAllowed(self.target.*, ty)) {
return self.asmRegisterRegister(mir_tag, dst_reg.to128(), src_reg.to128());
}
@ -4685,7 +4729,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
),
},
.immediate => |imm| {
switch (self.regBitSize(dst_ty)) {
switch (self.regBitSize(ty)) {
8, 16, 32 => {
try self.asmRegisterImmediate(
mir_tag,
@ -4704,7 +4748,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
try self.asmRegisterRegister(
mir_tag,
registerAlias(dst_reg, abi_size),
registerAlias(try self.copyToTmpRegister(dst_ty, src_mcv), abi_size),
registerAlias(try self.copyToTmpRegister(ty, src_mcv), abi_size),
);
}
},
@ -4719,13 +4763,10 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
const reg = try self.copyToTmpRegister(dst_ty, src_mcv);
return self.genBinOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg });
const reg = try self.copyToTmpRegister(ty, src_mcv);
return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .register = reg });
},
.stack_offset => |off| {
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
try self.asmRegisterMemory(
mir_tag,
registerAlias(dst_reg, abi_size),
@ -4734,76 +4775,155 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
},
}
},
.ptr_stack_offset, .stack_offset => |off| {
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
if (abi_size > 8) {
return self.fail("TODO implement {} for stack dst with large ABI", .{mir_tag});
}
.ptr_stack_offset, .stack_offset => |dst_off| {
const src: ?struct {
limb_reg: Register,
limb_lock: RegisterLock,
addr_reg: Register,
addr_lock: RegisterLock,
} = switch (src_mcv) {
else => null,
.memory, .linker_load => addr: {
const src_limb_reg = try self.register_manager.allocReg(null, gp);
const src_limb_lock = self.register_manager.lockRegAssumeUnused(src_limb_reg);
errdefer self.register_manager.unlockReg(src_limb_lock);
switch (src_mcv) {
.none => unreachable,
.undef => unreachable,
.dead, .unreach => unreachable,
.register_overflow => unreachable,
.register => |src_reg| {
try self.asmMemoryRegister(mir_tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}), registerAlias(src_reg, abi_size));
const src_addr_reg = try self.register_manager.allocReg(null, gp);
const src_addr_lock = self.register_manager.lockRegAssumeUnused(src_addr_reg);
errdefer self.register_manager.unlockReg(src_addr_lock);
try self.loadMemPtrIntoRegister(src_addr_reg, Type.usize, src_mcv);
// To get the actual address of the value we want to modify we
// we have to go through the GOT
try self.asmRegisterMemory(
.mov,
src_addr_reg,
Memory.sib(.qword, .{ .base = src_addr_reg }),
);
break :addr .{
.addr_reg = src_addr_reg,
.addr_lock = src_addr_lock,
.limb_reg = src_limb_reg,
.limb_lock = src_limb_lock,
};
},
.immediate => |imm| {
switch (self.regBitSize(dst_ty)) {
8, 16, 32 => {
try self.asmMemoryImmediate(
mir_tag,
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}),
if (math.cast(i32, @bitCast(i64, imm))) |small|
Immediate.s(small)
else
Immediate.u(@intCast(u32, imm)),
);
},
64 => {
if (math.cast(i32, @bitCast(i64, imm))) |small| {
};
defer if (src) |locks| {
self.register_manager.unlockReg(locks.limb_lock);
self.register_manager.unlockReg(locks.addr_lock);
};
const ty_signedness =
if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned;
const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) {
.signed => Type.usize,
.unsigned => Type.isize,
};
const limb_abi_size = @min(abi_size, 8);
var off: i32 = 0;
while (off < abi_size) : (off += 8) {
const mir_limb_tag = switch (off) {
0 => mir_tag,
else => switch (mir_tag) {
.add => .adc,
.sub => .sbb,
.@"or", .@"and", .xor => mir_tag,
else => return self.fail("TODO genBinOpMir implement large ABI for {s}", .{
@tagName(mir_tag),
}),
},
};
const dst_limb_mem = Memory.sib(
Memory.PtrSize.fromSize(limb_abi_size),
.{ .base = .rbp, .disp = off - dst_off },
);
switch (src_mcv) {
.none => unreachable,
.undef => unreachable,
.dead, .unreach => unreachable,
.register_overflow => unreachable,
.register => |src_reg| {
assert(off == 0);
try self.asmMemoryRegister(
mir_limb_tag,
dst_limb_mem,
registerAlias(src_reg, limb_abi_size),
);
},
.immediate => |src_imm| {
const imm = if (off == 0) src_imm else switch (ty_signedness) {
.signed => @bitCast(u64, @bitCast(i64, src_imm) >> 63),
.unsigned => 0,
};
switch (self.regBitSize(limb_ty)) {
8, 16, 32 => {
try self.asmMemoryImmediate(
mir_tag,
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}),
Immediate.s(small),
mir_limb_tag,
dst_limb_mem,
if (math.cast(i32, @bitCast(i64, imm))) |small|
Immediate.s(small)
else
Immediate.u(@intCast(u32, imm)),
);
} else {
try self.asmMemoryRegister(
mir_tag,
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
.base = .rbp,
.disp = -off,
}),
registerAlias(try self.copyToTmpRegister(dst_ty, src_mcv), abi_size),
);
}
},
else => return self.fail("TODO genBinOpMir implement large immediate ABI", .{}),
}
},
.memory,
.stack_offset,
.ptr_stack_offset,
=> {
return self.fail("TODO implement x86 genBinOpMir source memory", .{});
},
.linker_load => {
return self.fail("TODO implement x86 genBinOpMir source symbol at index in linker", .{});
},
.eflags => {
return self.fail("TODO implement x86 genBinOpMir source eflags", .{});
},
},
64 => {
if (math.cast(i32, @bitCast(i64, imm))) |small| {
try self.asmMemoryImmediate(
mir_limb_tag,
dst_limb_mem,
Immediate.s(small),
);
} else {
try self.asmMemoryRegister(
mir_limb_tag,
dst_limb_mem,
registerAlias(
try self.copyToTmpRegister(limb_ty, .{ .immediate = imm }),
limb_abi_size,
),
);
}
},
else => unreachable,
}
},
.memory, .linker_load => {
try self.asmRegisterMemory(
.mov,
registerAlias(src.?.limb_reg, limb_abi_size),
Memory.sib(
Memory.PtrSize.fromSize(limb_abi_size),
.{ .base = src.?.addr_reg, .disp = off },
),
);
try self.asmMemoryRegister(
mir_limb_tag,
dst_limb_mem,
registerAlias(src.?.limb_reg, limb_abi_size),
);
},
.stack_offset, .ptr_stack_offset, .eflags => {
const src_limb_reg = try self.copyToTmpRegister(limb_ty, switch (src_mcv) {
.stack_offset => |src_off| .{ .stack_offset = src_off - off },
.ptr_stack_offset,
.eflags,
=> off: {
assert(off == 0);
break :off src_mcv;
},
else => unreachable,
});
const src_limb_lock = self.register_manager.lockReg(src_limb_reg);
defer if (src_limb_lock) |lock| self.register_manager.unlockReg(lock);
try self.asmMemoryRegister(
mir_limb_tag,
dst_limb_mem,
registerAlias(src_limb_reg, limb_abi_size),
);
},
}
}
},
.memory => {
@ -4827,6 +4947,10 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.ptr_stack_offset => unreachable,
.register_overflow => unreachable,
.register => |dst_reg| {
const dst_alias = registerAlias(dst_reg, abi_size);
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
switch (src_mcv) {
.none => unreachable,
.undef => try self.genSetReg(dst_ty, dst_reg, .undef),
@ -4835,21 +4959,18 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.register_overflow => unreachable,
.register => |src_reg| try self.asmRegisterRegister(
.imul,
registerAlias(dst_reg, abi_size),
dst_alias,
registerAlias(src_reg, abi_size),
),
.immediate => |imm| {
if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) {
// TODO take into account the type's ABI size when selecting the register alias
// register, immediate
if (std.math.cast(i32, imm)) |small| {
try self.asmRegisterRegisterImmediate(
.imul,
dst_reg.to32(),
dst_reg.to32(),
Immediate.u(@intCast(u32, imm)),
dst_alias,
dst_alias,
Immediate.s(small),
);
} else {
// TODO verify we don't spill and assign to the same register as dst_mcv
const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv);
return self.genIntMulComplexOpMir(dst_ty, dst_mcv, MCValue{ .register = src_reg });
}
@ -4857,7 +4978,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.stack_offset => |off| {
try self.asmRegisterMemory(
.imul,
registerAlias(dst_reg, abi_size),
dst_alias,
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
);
},
@ -5344,6 +5465,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
}
const ty = self.air.typeOf(bin_op.lhs);
const abi_size = ty.abiSize(self.target.*);
if (abi_size > 8) return self.fail("TODO implement cmp for large values", .{});
const signedness: std.builtin.Signedness = blk: {
// For non-int types, we treat the values as unsigned
if (ty.zigTypeTag() != .Int) break :blk .unsigned;
@ -6655,10 +6779,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
}
},
.register => |reg| {
if (stack_offset > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
const base_reg = opts.dest_stack_base orelse .rbp;
switch (ty.zigTypeTag()) {
@ -6893,13 +7013,11 @@ fn genInlineMemset(
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
if (abi_size > 8) return self.fail("genSetReg called with a value larger than one register", .{});
switch (mcv) {
.dead => unreachable,
.register_overflow => unreachable,
.ptr_stack_offset => |off| {
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
try self.asmRegisterMemory(
.lea,
registerAlias(reg, abi_size),
@ -7060,16 +7178,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (reg.to64() == .rax) {
// If this is RAX, we can use a direct load.
// Otherwise, we need to load the address, then indirectly load the value.
var moffs: Mir.MemoryMoffs = .{
.seg = @enumToInt(Register.ds),
.msb = undefined,
.lsb = undefined,
};
moffs.encodeOffset(x);
_ = try self.addInst(.{
.tag = .mov_moffs,
.ops = .rax_moffs,
.data = .{ .payload = try self.addExtra(moffs) },
.data = .{ .payload = try self.addExtra(Mir.MemoryMoffs.encode(.ds, x)) },
});
} else {
// Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
@ -7084,10 +7196,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
},
},
.stack_offset => |off| {
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
switch (ty.zigTypeTag()) {
.Int => switch (ty.intInfo(self.target.*).signedness) {
.signed => {
@ -7151,7 +7259,15 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result = try self.resolveInst(un_op);
const result = if (self.liveness.isUnused(inst)) .dead else result: {
const src_mcv = try self.resolveInst(un_op);
if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv;
const dst_mcv = try self.allocRegOrMem(inst, true);
const dst_ty = self.air.typeOfIndex(inst);
try self.setRegOrMem(dst_ty, dst_mcv, src_mcv);
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}

View File

@ -1,40 +1,8 @@
//! This file contains the functionality for lowering x86_64 MIR into
//! machine code
//! This file contains the functionality for emitting x86_64 MIR as machine code
const Emit = @This();
const std = @import("std");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const abi = @import("abi.zig");
const encoder = @import("encoder.zig");
const link = @import("../../link.zig");
const log = std.log.scoped(.codegen);
const math = std.math;
const mem = std.mem;
const testing = std.testing;
const Air = @import("../../Air.zig");
const Allocator = mem.Allocator;
const CodeGen = @import("CodeGen.zig");
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Encoder = bits.Encoder;
const ErrorMsg = Module.ErrorMsg;
const Immediate = bits.Immediate;
const Instruction = encoder.Instruction;
const MCValue = @import("CodeGen.zig").MCValue;
const Memory = bits.Memory;
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const Register = bits.Register;
const Type = @import("../../type.zig").Type;
mir: Mir,
lower: Lower,
bin_file: *link.File,
debug_output: DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
code: *std.ArrayList(u8),
prev_di_line: u32,
@ -45,12 +13,163 @@ prev_di_pc: usize,
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
relocs: std.ArrayListUnmanaged(Reloc) = .{},
const InnerError = error{
OutOfMemory,
EmitFail,
InvalidInstruction,
CannotEncode,
};
pub const Error = Lower.Error || error{EmitFail};
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |i| {
const index = @intCast(Mir.Inst.Index, i);
const inst = emit.lower.mir.instructions.get(index);
const start_offset = @intCast(u32, emit.code.items.len);
try emit.code_offset_mapping.putNoClobber(emit.lower.allocator, index, start_offset);
for (try emit.lower.lowerMir(inst)) |lower_inst| try lower_inst.encode(emit.code.writer());
const end_offset = @intCast(u32, emit.code.items.len);
switch (inst.tag) {
else => {},
.jmp_reloc => try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = inst.data.inst,
.offset = end_offset - 4,
.length = 5,
}),
.call_extern => if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom_index = macho_file.getAtomIndexForSymbol(
.{ .sym_index = inst.data.relocation.atom_index, .file = null },
).?;
const target = macho_file.getGlobalByIndex(inst.data.relocation.sym_index);
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = end_offset - 4,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom_index = coff_file.getAtomIndexForSymbol(
.{ .sym_index = inst.data.relocation.atom_index, .file = null },
).?;
const target = coff_file.getGlobalByIndex(inst.data.relocation.sym_index);
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = end_offset - 4,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else return emit.fail("TODO implement {} for {}", .{ inst.tag, emit.bin_file.tag }),
.lea_linker => if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const metadata =
emit.lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const reloc_type = switch (inst.ops) {
.got_reloc => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
.direct_reloc => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
const atom_index = macho_file.getAtomIndexForSymbol(.{
.sym_index = metadata.atom_index,
.file = null,
}).?;
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = metadata.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const metadata =
emit.lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const atom_index = coff_file.getAtomIndexForSymbol(.{
.sym_index = metadata.atom_index,
.file = null,
}).?;
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (inst.ops) {
.got_reloc => .got,
.direct_reloc => .direct,
.import_reloc => .import,
else => unreachable,
},
.target = switch (inst.ops) {
.got_reloc,
.direct_reloc,
=> .{ .sym_index = metadata.sym_index, .file = null },
.import_reloc => coff_file.getGlobalByIndex(metadata.sym_index),
else => unreachable,
},
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else return emit.fail("TODO implement {} for {}", .{ inst.tag, emit.bin_file.tag }),
.jcc => try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = inst.data.inst_cc.inst,
.offset = end_offset - 4,
.length = 6,
}),
.dbg_line => {
const dbg_line_column =
emit.lower.mir.extraData(Mir.DbgLineColumn, inst.data.payload).data;
try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column);
},
.dbg_prologue_end => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.dbg_epilogue_begin => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
}
}
try emit.fixupRelocs();
}
pub fn deinit(emit: *Emit) void {
emit.relocs.deinit(emit.lower.allocator);
emit.code_offset_mapping.deinit(emit.lower.allocator);
emit.* = undefined;
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error {
return switch (emit.lower.fail(format, args)) {
error.LowerFail => error.EmitFail,
else => |e| e,
};
}
const Reloc = struct {
/// Offset of the instruction.
@ -63,148 +182,7 @@ const Reloc = struct {
length: u5,
};
pub fn lowerMir(emit: *Emit) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
for (mir_tags, 0..) |tag, index| {
const inst = @intCast(u32, index);
try emit.code_offset_mapping.putNoClobber(emit.bin_file.allocator, inst, emit.code.items.len);
switch (tag) {
.adc,
.add,
.@"and",
.bsf,
.bsr,
.bswap,
.bt,
.btc,
.btr,
.bts,
.call,
.cbw,
.cwde,
.cdqe,
.cwd,
.cdq,
.cqo,
.cmp,
.cmpxchg,
.div,
.fisttp,
.fld,
.idiv,
.imul,
.int3,
.jmp,
.lea,
.lfence,
.lzcnt,
.mfence,
.mov,
.movbe,
.movzx,
.mul,
.neg,
.nop,
.not,
.@"or",
.pop,
.popcnt,
.push,
.rcl,
.rcr,
.ret,
.rol,
.ror,
.sal,
.sar,
.sbb,
.sfence,
.shl,
.shld,
.shr,
.shrd,
.sub,
.syscall,
.@"test",
.tzcnt,
.ud2,
.xadd,
.xchg,
.xor,
.addss,
.cmpss,
.divss,
.maxss,
.minss,
.movss,
.mulss,
.roundss,
.subss,
.ucomiss,
.addsd,
.cmpsd,
.divsd,
.maxsd,
.minsd,
.movsd,
.mulsd,
.roundsd,
.subsd,
.ucomisd,
=> try emit.mirEncodeGeneric(tag, inst),
.cmps,
.lods,
.movs,
.scas,
.stos,
=> try emit.mirString(tag, inst),
.cmpxchgb => try emit.mirCmpxchgBytes(inst),
.jmp_reloc => try emit.mirJmpReloc(inst),
.call_extern => try emit.mirCallExtern(inst),
.lea_linker => try emit.mirLeaLinker(inst),
.mov_moffs => try emit.mirMovMoffs(inst),
.movsx => try emit.mirMovsx(inst),
.cmovcc => try emit.mirCmovcc(inst),
.setcc => try emit.mirSetcc(inst),
.jcc => try emit.mirJcc(inst),
.dbg_line => try emit.mirDbgLine(inst),
.dbg_prologue_end => try emit.mirDbgPrologueEnd(inst),
.dbg_epilogue_begin => try emit.mirDbgEpilogueBegin(inst),
.push_regs => try emit.mirPushPopRegisterList(.push, inst),
.pop_regs => try emit.mirPushPopRegisterList(.pop, inst),
.dead => {},
}
}
try emit.fixupRelocs();
}
pub fn deinit(emit: *Emit) void {
emit.relocs.deinit(emit.bin_file.allocator);
emit.code_offset_mapping.deinit(emit.bin_file.allocator);
emit.* = undefined;
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(emit.err_msg == null);
emit.err_msg = try ErrorMsg.create(emit.bin_file.allocator, emit.src_loc, format, args);
return error.EmitFail;
}
fn fixupRelocs(emit: *Emit) InnerError!void {
fn fixupRelocs(emit: *Emit) Error!void {
// TODO this function currently assumes all relocs via JMP/CALL instructions are 32bit in size.
// This should be reversed like it is done in aarch64 MIR emit code: start with the smallest
// possible resolution, i.e., 8bit, and iteratively converge on the minimum required resolution
@ -217,532 +195,7 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
}
}
fn encode(emit: *Emit, mnemonic: Instruction.Mnemonic, ops: Instruction.Init) InnerError!void {
const inst = try Instruction.new(mnemonic, ops);
return inst.encode(emit.code.writer());
}
fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
const mnemonic = inline for (@typeInfo(Instruction.Mnemonic).Enum.fields) |field| {
if (mem.eql(u8, field.name, @tagName(tag))) break @field(Instruction.Mnemonic, field.name);
} else unreachable;
const ops = emit.mir.instructions.items(.ops)[inst];
const data = emit.mir.instructions.items(.data)[inst];
const prefix: Instruction.Prefix = switch (ops) {
.lock_m_sib,
.lock_m_rip,
.lock_mi_sib_u,
.lock_mi_rip_u,
.lock_mi_sib_s,
.lock_mi_rip_s,
.lock_mr_sib,
.lock_mr_rip,
.lock_moffs_rax,
=> .lock,
else => .none,
};
var op1: Instruction.Operand = .none;
var op2: Instruction.Operand = .none;
var op3: Instruction.Operand = .none;
var op4: Instruction.Operand = .none;
switch (ops) {
.none => {},
.i_s => op1 = .{ .imm = Immediate.s(@bitCast(i32, data.i)) },
.i_u => op1 = .{ .imm = Immediate.u(data.i) },
.r => op1 = .{ .reg = data.r },
.rr => {
op1 = .{ .reg = data.rr.r1 };
op2 = .{ .reg = data.rr.r2 };
},
.rrr => {
op1 = .{ .reg = data.rrr.r1 };
op2 = .{ .reg = data.rrr.r2 };
op3 = .{ .reg = data.rrr.r3 };
},
.ri_s, .ri_u => {
const imm = switch (ops) {
.ri_s => Immediate.s(@bitCast(i32, data.ri.i)),
.ri_u => Immediate.u(data.ri.i),
else => unreachable,
};
op1 = .{ .reg = data.ri.r };
op2 = .{ .imm = imm };
},
.ri64 => {
const imm64 = emit.mir.extraData(Mir.Imm64, data.rx.payload).data;
op1 = .{ .reg = data.rx.r };
op2 = .{ .imm = Immediate.u(Mir.Imm64.decode(imm64)) };
},
.rri_s, .rri_u => {
const imm = switch (ops) {
.rri_s => Immediate.s(@bitCast(i32, data.rri.i)),
.rri_u => Immediate.u(data.rri.i),
else => unreachable,
};
op1 = .{ .reg = data.rri.r1 };
op2 = .{ .reg = data.rri.r2 };
op3 = .{ .imm = imm };
},
.m_sib, .lock_m_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
},
.m_rip, .lock_m_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
},
.mi_sib_s, .mi_sib_u, .lock_mi_sib_s, .lock_mi_sib_u => {
const msib = emit.mir.extraData(Mir.MemorySib, data.ix.payload).data;
const imm = switch (ops) {
.mi_sib_s, .lock_mi_sib_s => Immediate.s(@bitCast(i32, data.ix.i)),
.mi_sib_u, .lock_mi_sib_u => Immediate.u(data.ix.i),
else => unreachable,
};
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
op2 = .{ .imm = imm };
},
.mi_rip_u, .mi_rip_s, .lock_mi_rip_u, .lock_mi_rip_s => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.ix.payload).data;
const imm = switch (ops) {
.mi_rip_s, .lock_mi_rip_s => Immediate.s(@bitCast(i32, data.ix.i)),
.mi_rip_u, .lock_mi_rip_u => Immediate.u(data.ix.i),
else => unreachable,
};
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
op2 = .{ .imm = imm };
},
.rm_sib, .mr_sib, .lock_mr_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
const op_r = .{ .reg = data.rx.r };
const op_m = .{ .mem = Mir.MemorySib.decode(msib) };
switch (ops) {
.rm_sib => {
op1 = op_r;
op2 = op_m;
},
.mr_sib, .lock_mr_sib => {
op1 = op_m;
op2 = op_r;
},
else => unreachable,
}
},
.rm_rip, .mr_rip, .lock_mr_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
const op_r = .{ .reg = data.rx.r };
const op_m = .{ .mem = Mir.MemoryRip.decode(mrip) };
switch (ops) {
.rm_rip => {
op1 = op_r;
op2 = op_m;
},
.mr_rip, .lock_mr_rip => {
op1 = op_m;
op2 = op_r;
},
else => unreachable,
}
},
.mrr_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.rrx.payload).data;
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
op2 = .{ .reg = data.rrx.r1 };
op2 = .{ .reg = data.rrx.r2 };
},
.mrr_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rrx.payload).data;
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
op2 = .{ .reg = data.rrx.r1 };
op2 = .{ .reg = data.rrx.r2 };
},
.mri_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.rix.payload).data;
op1 = .{ .mem = Mir.MemorySib.decode(msib) };
op2 = .{ .reg = data.rix.r };
op3 = .{ .imm = Immediate.u(data.rix.i) };
},
.mri_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rix.payload).data;
op1 = .{ .mem = Mir.MemoryRip.decode(mrip) };
op2 = .{ .reg = data.rix.r };
op3 = .{ .imm = Immediate.u(data.rix.i) };
},
else => return emit.fail("TODO handle generic encoding: {s}, {s}", .{
@tagName(mnemonic),
@tagName(ops),
}),
}
return emit.encode(mnemonic, .{
.prefix = prefix,
.op1 = op1,
.op2 = op2,
.op3 = op3,
.op4 = op4,
});
}
fn mirString(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.string => {
const data = emit.mir.instructions.items(.data)[inst].string;
const mnemonic = switch (tag) {
inline .cmps, .lods, .movs, .scas, .stos => |comptime_tag| switch (data.width) {
inline else => |comptime_width| @field(
Instruction.Mnemonic,
@tagName(comptime_tag) ++ @tagName(comptime_width),
),
},
else => unreachable,
};
return emit.encode(mnemonic, .{ .prefix = switch (data.repeat) {
inline else => |comptime_repeat| @field(Instruction.Prefix, @tagName(comptime_repeat)),
} });
},
else => unreachable,
}
}
fn mirCmpxchgBytes(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const data = emit.mir.instructions.items(.data)[inst];
var op1: Instruction.Operand = .none;
switch (ops) {
.m_sib, .lock_m_sib => {
const sib = emit.mir.extraData(Mir.MemorySib, data.payload).data;
op1 = .{ .mem = Mir.MemorySib.decode(sib) };
},
.m_rip, .lock_m_rip => {
const rip = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
op1 = .{ .mem = Mir.MemoryRip.decode(rip) };
},
else => unreachable,
}
const mnemonic: Instruction.Mnemonic = switch (op1.mem.bitSize()) {
64 => .cmpxchg8b,
128 => .cmpxchg16b,
else => unreachable,
};
return emit.encode(mnemonic, .{
.prefix = switch (ops) {
.m_sib, .m_rip => .none,
.lock_m_sib, .lock_m_rip => .lock,
else => unreachable,
},
.op1 = op1,
});
}
fn mirMovMoffs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const payload = emit.mir.instructions.items(.data)[inst].payload;
const moffs = emit.mir.extraData(Mir.MemoryMoffs, payload).data;
const seg = @intToEnum(Register, moffs.seg);
const offset = moffs.decodeOffset();
switch (ops) {
.rax_moffs => {
try emit.encode(.mov, .{
.op1 = .{ .reg = .rax },
.op2 = .{ .mem = Memory.moffs(seg, offset) },
});
},
.moffs_rax, .lock_moffs_rax => {
try emit.encode(.mov, .{
.prefix = switch (ops) {
.moffs_rax => .none,
.lock_moffs_rax => .lock,
else => unreachable,
},
.op1 = .{ .mem = Memory.moffs(seg, offset) },
.op2 = .{ .reg = .rax },
});
},
else => unreachable,
}
}
fn mirMovsx(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const data = emit.mir.instructions.items(.data)[inst];
var op1: Instruction.Operand = .none;
var op2: Instruction.Operand = .none;
switch (ops) {
.rr => {
op1 = .{ .reg = data.rr.r1 };
op2 = .{ .reg = data.rr.r2 };
},
.rm_sib => {
const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data;
op1 = .{ .reg = data.rx.r };
op2 = .{ .mem = Mir.MemorySib.decode(msib) };
},
.rm_rip => {
const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data;
op1 = .{ .reg = data.rx.r };
op2 = .{ .mem = Mir.MemoryRip.decode(mrip) };
},
else => unreachable, // TODO
}
const mnemonic: Instruction.Mnemonic = switch (op1.bitSize()) {
32, 64 => if (op2.bitSize() == 32) .movsxd else .movsx,
else => .movsx,
};
return emit.encode(mnemonic, .{
.op1 = op1,
.op2 = op2,
});
}
fn mnemonicFromConditionCode(comptime basename: []const u8, cc: bits.Condition) Instruction.Mnemonic {
return switch (cc) {
inline else => |comptime_cc| @field(Instruction.Mnemonic, basename ++ @tagName(comptime_cc)),
};
}
fn mirCmovcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.rr_cc => {
const data = emit.mir.instructions.items(.data)[inst].rr_cc;
const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r1 },
.op2 = .{ .reg = data.r2 },
});
},
.rm_sib_cc => {
const data = emit.mir.instructions.items(.data)[inst].rx_cc;
const extra = emit.mir.extraData(Mir.MemorySib, data.payload).data;
const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r },
.op2 = .{ .mem = Mir.MemorySib.decode(extra) },
});
},
.rm_rip_cc => {
const data = emit.mir.instructions.items(.data)[inst].rx_cc;
const extra = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
const mnemonic = mnemonicFromConditionCode("cmov", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r },
.op2 = .{ .mem = Mir.MemoryRip.decode(extra) },
});
},
else => unreachable,
}
}
fn mirSetcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.r_cc => {
const data = emit.mir.instructions.items(.data)[inst].r_cc;
const mnemonic = mnemonicFromConditionCode("set", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .reg = data.r },
});
},
.m_sib_cc => {
const data = emit.mir.instructions.items(.data)[inst].x_cc;
const extra = emit.mir.extraData(Mir.MemorySib, data.payload).data;
const mnemonic = mnemonicFromConditionCode("set", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .mem = Mir.MemorySib.decode(extra) },
});
},
.m_rip_cc => {
const data = emit.mir.instructions.items(.data)[inst].x_cc;
const extra = emit.mir.extraData(Mir.MemoryRip, data.payload).data;
const mnemonic = mnemonicFromConditionCode("set", data.cc);
return emit.encode(mnemonic, .{
.op1 = .{ .mem = Mir.MemoryRip.decode(extra) },
});
},
else => unreachable, // TODO
}
}
fn mirJcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
switch (ops) {
.inst_cc => {
const data = emit.mir.instructions.items(.data)[inst].inst_cc;
const mnemonic = mnemonicFromConditionCode("j", data.cc);
const source = emit.code.items.len;
try emit.encode(mnemonic, .{
.op1 = .{ .imm = Immediate.s(0) },
});
try emit.relocs.append(emit.bin_file.allocator, .{
.source = source,
.target = data.inst,
.offset = emit.code.items.len - 4,
.length = 6,
});
},
else => unreachable, // TODO
}
}
fn mirJmpReloc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const target = emit.mir.instructions.items(.data)[inst].inst;
const source = emit.code.items.len;
try emit.encode(.jmp, .{
.op1 = .{ .imm = Immediate.s(0) },
});
try emit.relocs.append(emit.bin_file.allocator, .{
.source = source,
.target = target,
.offset = emit.code.items.len - 4,
.length = 5,
});
}
fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const relocation = emit.mir.instructions.items(.data)[inst].relocation;
const offset = blk: {
try emit.encode(.call, .{
.op1 = .{ .imm = Immediate.s(0) },
});
break :blk @intCast(u32, emit.code.items.len) - 4;
};
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = offset,
.addend = 0,
.pcrel = true,
.length = 2,
});
} else {
return emit.fail("TODO implement call_extern for linking backends different than MachO and COFF", .{});
}
}
fn mirPushPopRegisterList(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
const payload = emit.mir.instructions.items(.data)[inst].payload;
const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data;
const base = @intToEnum(Register, save_reg_list.base_reg);
var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
const callee_preserved_regs = abi.getCalleePreservedRegs(emit.target.*);
for (callee_preserved_regs) |reg| {
if (reg_list.isSet(callee_preserved_regs, reg)) {
const op1: Instruction.Operand = .{ .mem = Memory.sib(.qword, .{
.base = base,
.disp = disp,
}) };
const op2: Instruction.Operand = .{ .reg = reg };
switch (tag) {
.push => try emit.encode(.mov, .{
.op1 = op1,
.op2 = op2,
}),
.pop => try emit.encode(.mov, .{
.op1 = op2,
.op2 = op1,
}),
else => unreachable,
}
disp += 8;
}
}
}
fn mirLeaLinker(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst];
const payload = emit.mir.instructions.items(.data)[inst].payload;
const metadata = emit.mir.extraData(Mir.LeaRegisterReloc, payload).data;
const reg = @intToEnum(Register, metadata.reg);
try emit.encode(.lea, .{
.op1 = .{ .reg = reg },
.op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
});
const end_offset = emit.code.items.len;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const reloc_type = switch (ops) {
.got_reloc => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
.direct_reloc => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
const atom_index = macho_file.getAtomIndexForSymbol(.{
.sym_index = metadata.atom_index,
.file = null,
}).?;
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = metadata.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom_index = coff_file.getAtomIndexForSymbol(.{
.sym_index = metadata.atom_index,
.file = null,
}).?;
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops) {
.got_reloc => .got,
.direct_reloc => .direct,
.import_reloc => .import,
else => unreachable,
},
.target = switch (ops) {
.got_reloc, .direct_reloc => .{ .sym_index = metadata.sym_index, .file = null },
.import_reloc => coff_file.getGlobalByIndex(metadata.sym_index),
else => unreachable,
},
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
});
} else {
return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
}
}
fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const payload = emit.mir.instructions.items(.data)[inst].payload;
const dbg_line_column = emit.mir.extraData(Mir.DbgLineColumn, payload).data;
log.debug("mirDbgLine", .{});
try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column);
}
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
log.debug(" (advance pc={d} and line={d})", .{ delta_line, delta_pc });
@ -756,7 +209,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
// we have already checked the target in the linker to make sure it is compatable
const quant = @import("../../link/Plan9/aout.zig").getPCQuant(emit.target.cpu.arch) catch unreachable;
const quant = @import("../../link/Plan9/aout.zig").getPCQuant(emit.lower.target.cpu.arch) catch unreachable;
// increasing the line number
try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line);
@ -792,34 +245,12 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
}
}
fn mirDbgPrologueEnd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
_ = inst;
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line,
emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
const link = @import("../../link.zig");
const log = std.log.scoped(.emit);
const mem = std.mem;
const std = @import("std");
fn mirDbgEpilogueBegin(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
_ = inst;
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line,
emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Emit = @This();
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");

View File

@ -7,30 +7,32 @@ const math = std.math;
const bits = @import("bits.zig");
const encoder = @import("encoder.zig");
const Instruction = encoder.Instruction;
const Operand = Instruction.Operand;
const Prefix = Instruction.Prefix;
const Register = bits.Register;
const Rex = encoder.Rex;
const LegacyPrefixes = encoder.LegacyPrefixes;
const table = @import("encodings.zig").table;
mnemonic: Mnemonic,
op_en: OpEn,
op1: Op,
op2: Op,
op3: Op,
op4: Op,
opc_len: u3,
opc: [7]u8,
modrm_ext: u3,
mode: Mode,
data: Data,
pub fn findByMnemonic(mnemonic: Mnemonic, args: Instruction.Init) !?Encoding {
const input_op1 = Op.fromOperand(args.op1);
const input_op2 = Op.fromOperand(args.op2);
const input_op3 = Op.fromOperand(args.op3);
const input_op4 = Op.fromOperand(args.op4);
const Data = struct {
op_en: OpEn,
ops: [4]Op,
opc_len: u3,
opc: [7]u8,
modrm_ext: u3,
mode: Mode,
};
pub fn findByMnemonic(
prefix: Instruction.Prefix,
mnemonic: Mnemonic,
ops: []const Instruction.Operand,
) !?Encoding {
var input_ops = [1]Op{.none} ** 4;
for (input_ops[0..ops.len], ops) |*input_op, op| input_op.* = Op.fromOperand(op);
const ops = &[_]Instruction.Operand{ args.op1, args.op2, args.op3, args.op4 };
const rex_required = for (ops) |op| switch (op) {
.reg => |r| switch (r) {
.spl, .bpl, .sil, .dil => break true,
@ -60,88 +62,29 @@ pub fn findByMnemonic(mnemonic: Mnemonic, args: Instruction.Init) !?Encoding {
if ((rex_required or rex_extended) and rex_invalid) return error.CannotEncode;
// TODO work out what is the maximum number of variants we can actually find in one swoop.
var candidates: [10]Encoding = undefined;
var count: usize = 0;
for (table) |entry| {
var enc = Encoding{
.mnemonic = entry[0],
.op_en = entry[1],
.op1 = entry[2],
.op2 = entry[3],
.op3 = entry[4],
.op4 = entry[5],
.opc_len = @intCast(u3, entry[6].len),
.opc = undefined,
.modrm_ext = entry[7],
.mode = entry[8],
};
std.mem.copy(u8, &enc.opc, entry[6]);
if (enc.mnemonic == mnemonic and
input_op1.isSubset(enc.op1, enc.mode) and
input_op2.isSubset(enc.op2, enc.mode) and
input_op3.isSubset(enc.op3, enc.mode) and
input_op4.isSubset(enc.op4, enc.mode))
{
if (rex_required) {
switch (enc.mode) {
.rex, .long => {
candidates[count] = enc;
count += 1;
},
else => {},
}
} else {
if (enc.mode != .rex) {
candidates[count] = enc;
count += 1;
}
}
var shortest_enc: ?Encoding = null;
var shortest_len: ?usize = null;
next: for (mnemonic_to_encodings_map[@enumToInt(mnemonic)]) |data| {
switch (data.mode) {
.rex => if (!rex_required) continue,
.long => {},
else => if (rex_required) continue,
}
for (input_ops, data.ops) |input_op, data_op|
if (!input_op.isSubset(data_op, data.mode)) continue :next;
const enc = Encoding{ .mnemonic = mnemonic, .data = data };
if (shortest_enc) |previous_shortest_enc| {
const len = estimateInstructionLength(prefix, enc, ops);
const previous_shortest_len = shortest_len orelse
estimateInstructionLength(prefix, previous_shortest_enc, ops);
if (len < previous_shortest_len) {
shortest_enc = enc;
shortest_len = len;
} else shortest_len = previous_shortest_len;
} else shortest_enc = enc;
}
if (count == 0) return null;
if (count == 1) return candidates[0];
const EncodingLength = struct {
fn estimate(encoding: Encoding, params: Instruction.Init) usize {
var inst = Instruction{
.op1 = params.op1,
.op2 = params.op2,
.op3 = params.op3,
.op4 = params.op4,
.prefix = params.prefix,
.encoding = encoding,
};
var cwriter = std.io.countingWriter(std.io.null_writer);
inst.encode(cwriter.writer()) catch unreachable; // Not allowed to fail here unless OOM.
return @intCast(usize, cwriter.bytes_written);
}
};
var shortest_encoding: ?struct {
index: usize,
len: usize,
} = null;
var i: usize = 0;
while (i < count) : (i += 1) {
const candidate = candidates[i];
switch (candidate.mode) {
.long, .rex => if (rex_invalid) return error.CannotEncode,
else => {},
}
const len = EncodingLength.estimate(candidate, args);
const current = shortest_encoding orelse {
shortest_encoding = .{ .index = i, .len = len };
continue;
};
if (len < current.len) {
shortest_encoding = .{ .index = i, .len = len };
}
}
return candidates[shortest_encoding.?.index];
return shortest_enc;
}
/// Returns first matching encoding by opcode.
@ -149,57 +92,45 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct {
legacy: LegacyPrefixes,
rex: Rex,
}, modrm_ext: ?u3) ?Encoding {
for (table) |entry| {
const enc = Encoding{
.mnemonic = entry[0],
.op_en = entry[1],
.op1 = entry[2],
.op2 = entry[3],
.op3 = entry[4],
.op4 = entry[5],
.opc_len = entry[6],
.opc = .{ entry[7], entry[8], entry[9] },
.modrm_ext = entry[10],
.mode = entry[11],
};
const match = match: {
if (modrm_ext) |ext| {
break :match ext == enc.modrm_ext and std.mem.eql(u8, enc.opcode(), opc);
for (mnemonic_to_encodings_map, 0..) |encs, mnemonic_int| for (encs) |data| {
const enc = Encoding{ .mnemonic = @intToEnum(Mnemonic, mnemonic_int), .data = data };
if (modrm_ext) |ext| if (ext != data.modrm_ext) continue;
if (!std.mem.eql(u8, opc, enc.opcode())) continue;
if (prefixes.rex.w) {
switch (data.mode) {
.short, .fpu, .sse, .sse2, .sse4_1, .none => continue,
.long, .rex => {},
}
break :match std.mem.eql(u8, enc.opcode(), opc);
};
if (match) {
if (prefixes.rex.w) {
switch (enc.mode) {
.fpu, .sse, .sse2, .sse4_1, .none => {},
.long, .rex => return enc,
}
} else if (prefixes.rex.present and !prefixes.rex.isSet()) {
if (enc.mode == .rex) return enc;
} else if (prefixes.legacy.prefix_66) {
switch (enc.operandBitSize()) {
16 => return enc,
} else if (prefixes.rex.present and !prefixes.rex.isSet()) {
switch (data.mode) {
.rex => {},
else => continue,
}
} else if (prefixes.legacy.prefix_66) {
switch (enc.operandBitSize()) {
16 => {},
else => continue,
}
} else {
switch (data.mode) {
.none => switch (enc.operandBitSize()) {
16 => continue,
else => {},
}
} else {
if (enc.mode == .none) {
switch (enc.operandBitSize()) {
16 => {},
else => return enc,
}
}
},
else => continue,
}
}
}
return enc;
};
return null;
}
pub fn opcode(encoding: *const Encoding) []const u8 {
return encoding.opc[0..encoding.opc_len];
return encoding.data.opc[0..encoding.data.opc_len];
}
pub fn mandatoryPrefix(encoding: *const Encoding) ?u8 {
const prefix = encoding.opc[0];
const prefix = encoding.data.opc[0];
return switch (prefix) {
0x66, 0xf2, 0xf3 => prefix,
else => null,
@ -207,27 +138,27 @@ pub fn mandatoryPrefix(encoding: *const Encoding) ?u8 {
}
pub fn modRmExt(encoding: Encoding) u3 {
return switch (encoding.op_en) {
.m, .mi, .m1, .mc => encoding.modrm_ext,
return switch (encoding.data.op_en) {
.m, .mi, .m1, .mc => encoding.data.modrm_ext,
else => unreachable,
};
}
pub fn operandBitSize(encoding: Encoding) u64 {
switch (encoding.mode) {
switch (encoding.data.mode) {
.short => return 16,
.long => return 64,
else => {},
}
const bit_size: u64 = switch (encoding.op_en) {
.np => switch (encoding.op1) {
const bit_size: u64 = switch (encoding.data.op_en) {
.np => switch (encoding.data.ops[0]) {
.o16 => 16,
.o32 => 32,
.o64 => 64,
else => 32,
},
.td => encoding.op2.bitSize(),
else => encoding.op1.bitSize(),
.td => encoding.data.ops[1].bitSize(),
else => encoding.data.ops[0].bitSize(),
};
return bit_size;
}
@ -240,7 +171,7 @@ pub fn format(
) !void {
_ = options;
_ = fmt;
switch (encoding.mode) {
switch (encoding.data.mode) {
.long => try writer.writeAll("REX.W + "),
else => {},
}
@ -249,10 +180,10 @@ pub fn format(
try writer.print("{x:0>2} ", .{byte});
}
switch (encoding.op_en) {
switch (encoding.data.op_en) {
.np, .fd, .td, .i, .zi, .d => {},
.o, .oi => {
const tag = switch (encoding.op1) {
const tag = switch (encoding.data.ops[0]) {
.r8 => "rb",
.r16 => "rw",
.r32 => "rd",
@ -265,12 +196,12 @@ pub fn format(
.mr, .rm, .rmi, .mri, .mrc => try writer.writeAll("/r "),
}
switch (encoding.op_en) {
switch (encoding.data.op_en) {
.i, .d, .zi, .oi, .mi, .rmi, .mri => {
const op = switch (encoding.op_en) {
.i, .d => encoding.op1,
.zi, .oi, .mi => encoding.op2,
.rmi, .mri => encoding.op3,
const op = switch (encoding.data.op_en) {
.i, .d => encoding.data.ops[0],
.zi, .oi, .mi => encoding.data.ops[1],
.rmi, .mri => encoding.data.ops[2],
else => unreachable,
};
const tag = switch (op) {
@ -290,13 +221,12 @@ pub fn format(
try writer.print("{s} ", .{@tagName(encoding.mnemonic)});
const ops = &[_]Op{ encoding.op1, encoding.op2, encoding.op3, encoding.op4 };
for (ops) |op| switch (op) {
for (encoding.data.ops) |op| switch (op) {
.none, .o16, .o32, .o64 => break,
else => try writer.print("{s} ", .{@tagName(op)}),
};
const op_en = switch (encoding.op_en) {
const op_en = switch (encoding.data.op_en) {
.zi => .i,
else => |op_en| op_en,
};
@ -604,3 +534,53 @@ pub const Mode = enum {
sse2,
sse4_1,
};
fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Operand) usize {
var inst = Instruction{
.prefix = prefix,
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
};
std.mem.copy(Operand, &inst.ops, ops);
var cwriter = std.io.countingWriter(std.io.null_writer);
inst.encode(cwriter.writer()) catch unreachable; // Not allowed to fail here unless OOM.
return @intCast(usize, cwriter.bytes_written);
}
const mnemonic_to_encodings_map = init: {
@setEvalBranchQuota(100_000);
const encodings = @import("encodings.zig");
var entries = encodings.table;
std.sort.sort(encodings.Entry, &entries, {}, struct {
fn lessThan(_: void, lhs: encodings.Entry, rhs: encodings.Entry) bool {
return @enumToInt(lhs[0]) < @enumToInt(rhs[0]);
}
}.lessThan);
var data_storage: [entries.len]Data = undefined;
var mnemonic_map: [@typeInfo(Mnemonic).Enum.fields.len][]const Data = undefined;
var mnemonic_int = 0;
var mnemonic_start = 0;
for (&data_storage, entries, 0..) |*data, entry, data_index| {
data.* = .{
.op_en = entry[1],
.ops = undefined,
.opc_len = entry[3].len,
.opc = undefined,
.modrm_ext = entry[4],
.mode = entry[5],
};
std.mem.copy(Op, &data.ops, entry[2]);
std.mem.copy(u8, &data.opc, entry[3]);
while (mnemonic_int < @enumToInt(entry[0])) : (mnemonic_int += 1) {
mnemonic_map[mnemonic_int] = data_storage[mnemonic_start..data_index];
mnemonic_start = data_index;
}
}
while (mnemonic_int < mnemonic_map.len) : (mnemonic_int += 1) {
mnemonic_map[mnemonic_int] = data_storage[mnemonic_start..];
mnemonic_start = data_storage.len;
}
break :init mnemonic_map;
};

465
src/arch/x86_64/Lower.zig Normal file
View File

@ -0,0 +1,465 @@
//! This file contains the functionality for lowering x86_64 MIR to Instructions
allocator: Allocator,
mir: Mir,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
result: [
std.mem.max(usize, &.{
abi.Win64.callee_preserved_regs.len,
abi.SysV.callee_preserved_regs.len,
})
]Instruction = undefined,
result_len: usize = undefined,
pub const Error = error{
OutOfMemory,
LowerFail,
InvalidInstruction,
CannotEncode,
};
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction {
lower.result = undefined;
errdefer lower.result = undefined;
lower.result_len = 0;
defer lower.result_len = undefined;
switch (inst.tag) {
.adc,
.add,
.@"and",
.bsf,
.bsr,
.bswap,
.bt,
.btc,
.btr,
.bts,
.call,
.cbw,
.cwde,
.cdqe,
.cwd,
.cdq,
.cqo,
.cmp,
.cmpxchg,
.div,
.fisttp,
.fld,
.idiv,
.imul,
.int3,
.jmp,
.lea,
.lfence,
.lzcnt,
.mfence,
.mov,
.movbe,
.movzx,
.mul,
.neg,
.nop,
.not,
.@"or",
.pop,
.popcnt,
.push,
.rcl,
.rcr,
.ret,
.rol,
.ror,
.sal,
.sar,
.sbb,
.sfence,
.shl,
.shld,
.shr,
.shrd,
.sub,
.syscall,
.@"test",
.tzcnt,
.ud2,
.xadd,
.xchg,
.xor,
.addss,
.cmpss,
.divss,
.maxss,
.minss,
.movss,
.mulss,
.roundss,
.subss,
.ucomiss,
.addsd,
.cmpsd,
.divsd,
.maxsd,
.minsd,
.movsd,
.mulsd,
.roundsd,
.subsd,
.ucomisd,
=> try lower.mirGeneric(inst),
.cmps,
.lods,
.movs,
.scas,
.stos,
=> try lower.mirString(inst),
.cmpxchgb => try lower.mirCmpxchgBytes(inst),
.jmp_reloc => try lower.emit(.none, .jmp, &.{.{ .imm = Immediate.s(0) }}),
.call_extern => try lower.emit(.none, .call, &.{.{ .imm = Immediate.s(0) }}),
.lea_linker => try lower.mirLeaLinker(inst),
.mov_moffs => try lower.mirMovMoffs(inst),
.movsx => try lower.mirMovsx(inst),
.cmovcc => try lower.mirCmovcc(inst),
.setcc => try lower.mirSetcc(inst),
.jcc => try lower.emit(.none, mnem_cc(.j, inst.data.inst_cc.cc), &.{.{ .imm = Immediate.s(0) }}),
.push_regs, .pop_regs => try lower.mirPushPopRegisterList(inst),
.dbg_line,
.dbg_prologue_end,
.dbg_epilogue_begin,
.dead,
=> {},
}
return lower.result[0..lower.result_len];
}
pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
@setCold(true);
assert(lower.err_msg == null);
lower.err_msg = try ErrorMsg.create(lower.allocator, lower.src_loc, format, args);
return error.LowerFail;
}
fn mnem_cc(comptime base: @Type(.EnumLiteral), cc: bits.Condition) Mnemonic {
return switch (cc) {
inline else => |c| @field(Mnemonic, @tagName(base) ++ @tagName(c)),
};
}
fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
return switch (ops) {
.rri_s,
.ri_s,
.i_s,
.mi_sib_s,
.mi_rip_s,
.lock_mi_sib_s,
.lock_mi_rip_s,
=> Immediate.s(@bitCast(i32, i)),
.rri_u,
.ri_u,
.i_u,
.mi_sib_u,
.mi_rip_u,
.lock_mi_sib_u,
.lock_mi_rip_u,
.mri_sib,
.mri_rip,
=> Immediate.u(i),
.ri64 => Immediate.u(lower.mir.extraData(Mir.Imm64, i).data.decode()),
else => unreachable,
};
}
fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
return switch (ops) {
.rm_sib,
.rm_sib_cc,
.m_sib,
.m_sib_cc,
.mi_sib_u,
.mi_sib_s,
.mr_sib,
.mrr_sib,
.mri_sib,
.lock_m_sib,
.lock_mi_sib_u,
.lock_mi_sib_s,
.lock_mr_sib,
=> lower.mir.extraData(Mir.MemorySib, payload).data.decode(),
.rm_rip,
.rm_rip_cc,
.m_rip,
.m_rip_cc,
.mi_rip_u,
.mi_rip_s,
.mr_rip,
.mrr_rip,
.mri_rip,
.lock_m_rip,
.lock_mi_rip_u,
.lock_mi_rip_s,
.lock_mr_rip,
=> lower.mir.extraData(Mir.MemoryRip, payload).data.decode(),
.rax_moffs,
.moffs_rax,
.lock_moffs_rax,
=> lower.mir.extraData(Mir.MemoryMoffs, payload).data.decode(),
else => unreachable,
};
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
lower.result[lower.result_len] = try Instruction.new(prefix, mnemonic, ops);
lower.result_len += 1;
}
fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void {
try lower.emit(switch (inst.ops) {
else => .none,
.lock_m_sib,
.lock_m_rip,
.lock_mi_sib_u,
.lock_mi_rip_u,
.lock_mi_sib_s,
.lock_mi_rip_s,
.lock_mr_sib,
.lock_mr_rip,
.lock_moffs_rax,
=> .lock,
}, switch (inst.tag) {
inline else => |tag| if (@hasField(Mnemonic, @tagName(tag)))
@field(Mnemonic, @tagName(tag))
else
unreachable,
}, switch (inst.ops) {
.none => &.{},
.i_s, .i_u => &.{
.{ .imm = lower.imm(inst.ops, inst.data.i) },
},
.r => &.{
.{ .reg = inst.data.r },
},
.rr => &.{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
},
.rrr => &.{
.{ .reg = inst.data.rrr.r1 },
.{ .reg = inst.data.rrr.r2 },
.{ .reg = inst.data.rrr.r3 },
},
.ri_s, .ri_u => &.{
.{ .reg = inst.data.ri.r },
.{ .imm = lower.imm(inst.ops, inst.data.ri.i) },
},
.ri64 => &.{
.{ .reg = inst.data.rx.r },
.{ .imm = lower.imm(inst.ops, inst.data.rx.payload) },
},
.rri_s, .rri_u => &.{
.{ .reg = inst.data.rri.r1 },
.{ .reg = inst.data.rri.r2 },
.{ .imm = lower.imm(inst.ops, inst.data.rri.i) },
},
.m_sib, .lock_m_sib, .m_rip, .lock_m_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
},
.mi_sib_s,
.lock_mi_sib_s,
.mi_sib_u,
.lock_mi_sib_u,
.mi_rip_u,
.lock_mi_rip_u,
.mi_rip_s,
.lock_mi_rip_s,
=> &.{
.{ .mem = lower.mem(inst.ops, inst.data.ix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.ix.i) },
},
.rm_sib, .rm_rip => &.{
.{ .reg = inst.data.rx.r },
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
},
.mr_sib, .lock_mr_sib, .mr_rip, .lock_mr_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
.{ .reg = inst.data.rx.r },
},
.mrr_sib, .mrr_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rrx.payload) },
.{ .reg = inst.data.rrx.r1 },
.{ .reg = inst.data.rrx.r2 },
},
.mri_sib, .mri_rip => &.{
.{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
.{ .reg = inst.data.rix.r },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
});
}
fn mirString(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.string => try lower.emit(switch (inst.data.string.repeat) {
inline else => |repeat| @field(Prefix, @tagName(repeat)),
}, switch (inst.tag) {
inline .cmps, .lods, .movs, .scas, .stos => |tag| switch (inst.data.string.width) {
inline else => |width| @field(Mnemonic, @tagName(tag) ++ @tagName(width)),
},
else => unreachable,
}, &.{}),
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
}
}
fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void {
const ops: [1]Operand = switch (inst.ops) {
.m_sib, .lock_m_sib, .m_rip, .lock_m_rip => .{
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
try lower.emit(switch (inst.ops) {
.m_sib, .m_rip => .none,
.lock_m_sib, .lock_m_rip => .lock,
else => unreachable,
}, switch (@divExact(ops[0].bitSize(), 8)) {
8 => .cmpxchg8b,
16 => .cmpxchg16b,
else => return lower.fail("invalid operand for {s}", .{@tagName(inst.tag)}),
}, &ops);
}
fn mirMovMoffs(lower: *Lower, inst: Mir.Inst) Error!void {
try lower.emit(switch (inst.ops) {
.rax_moffs, .moffs_rax => .none,
.lock_moffs_rax => .lock,
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
}, .mov, switch (inst.ops) {
.rax_moffs => &.{
.{ .reg = .rax },
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
},
.moffs_rax, .lock_moffs_rax => &.{
.{ .mem = lower.mem(inst.ops, inst.data.payload) },
.{ .reg = .rax },
},
else => unreachable,
});
}
fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void {
const ops: [2]Operand = switch (inst.ops) {
.rr => .{
.{ .reg = inst.data.rr.r1 },
.{ .reg = inst.data.rr.r2 },
},
.rm_sib, .rm_rip => .{
.{ .reg = inst.data.rx.r },
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
},
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
};
try lower.emit(.none, switch (ops[0].bitSize()) {
32, 64 => switch (ops[1].bitSize()) {
32 => .movsxd,
else => .movsx,
},
else => .movsx,
}, &ops);
}
fn mirCmovcc(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.rr_cc => try lower.emit(.none, mnem_cc(.cmov, inst.data.rr_cc.cc), &.{
.{ .reg = inst.data.rr_cc.r1 },
.{ .reg = inst.data.rr_cc.r2 },
}),
.rm_sib_cc, .rm_rip_cc => try lower.emit(.none, mnem_cc(.cmov, inst.data.rx_cc.cc), &.{
.{ .reg = inst.data.rx_cc.r },
.{ .mem = lower.mem(inst.ops, inst.data.rx_cc.payload) },
}),
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
}
}
fn mirSetcc(lower: *Lower, inst: Mir.Inst) Error!void {
switch (inst.ops) {
.r_cc => try lower.emit(.none, mnem_cc(.set, inst.data.r_cc.cc), &.{
.{ .reg = inst.data.r_cc.r },
}),
.m_sib_cc, .m_rip_cc => try lower.emit(.none, mnem_cc(.set, inst.data.x_cc.cc), &.{
.{ .mem = lower.mem(inst.ops, inst.data.x_cc.payload) },
}),
else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
}
}
fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst) Error!void {
const save_reg_list = lower.mir.extraData(Mir.SaveRegisterList, inst.data.payload).data;
const base = @intToEnum(Register, save_reg_list.base_reg);
var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*);
for (callee_preserved_regs) |callee_preserved_reg| {
if (!reg_list.isSet(callee_preserved_regs, callee_preserved_reg)) continue;
const reg_op = Operand{ .reg = callee_preserved_reg };
const mem_op = Operand{ .mem = Memory.sib(.qword, .{ .base = base, .disp = disp }) };
try lower.emit(.none, .mov, switch (inst.tag) {
.push_regs => &.{ mem_op, reg_op },
.pop_regs => &.{ reg_op, mem_op },
else => unreachable,
});
disp += 8;
}
}
fn mirLeaLinker(lower: *Lower, inst: Mir.Inst) Error!void {
const metadata = lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data;
const reg = @intToEnum(Register, metadata.reg);
try lower.emit(.none, .lea, &.{
.{ .reg = reg },
.{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
});
}
const abi = @import("abi.zig");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const encoder = @import("encoder.zig");
const std = @import("std");
const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
const ErrorMsg = Module.ErrorMsg;
const Immediate = bits.Immediate;
const Instruction = encoder.Instruction;
const Lower = @This();
const Memory = bits.Memory;
const Mir = @import("Mir.zig");
const Mnemonic = Instruction.Mnemonic;
const Module = @import("../../Module.zig");
const Operand = Instruction.Operand;
const Prefix = Instruction.Prefix;
const Register = bits.Register;

View File

@ -655,16 +655,19 @@ pub const MemoryMoffs = struct {
msb: u32,
lsb: u32,
pub fn encodeOffset(moffs: *MemoryMoffs, v: u64) void {
moffs.msb = @truncate(u32, v >> 32);
moffs.lsb = @truncate(u32, v);
pub fn encode(seg: Register, offset: u64) MemoryMoffs {
return .{
.seg = @enumToInt(seg),
.msb = @truncate(u32, offset >> 32),
.lsb = @truncate(u32, offset >> 0),
};
}
pub fn decodeOffset(moffs: *const MemoryMoffs) u64 {
var res: u64 = 0;
res |= (@intCast(u64, moffs.msb) << 32);
res |= @intCast(u64, moffs.lsb);
return res;
pub fn decode(moffs: MemoryMoffs) Memory {
return .{ .moffs = .{
.seg = @intToEnum(Register, moffs.seg),
.offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0,
} };
}
};

View File

@ -515,7 +515,7 @@ pub const Memory = union(enum) {
return switch (mem) {
.rip => |r| r.ptr_size.bitSize(),
.sib => |s| s.ptr_size.bitSize(),
.moffs => unreachable,
.moffs => 64,
};
}
};

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@ const Type = @import("type.zig").Type;
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
pub fn dump(module: *Module, air: Air, liveness: Liveness) void {
pub fn write(stream: anytype, module: *Module, air: Air, liveness: Liveness) void {
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
@ -23,7 +23,7 @@ pub fn dump(module: *Module, air: Air, liveness: Liveness) void {
liveness_special_bytes + tomb_bytes;
// zig fmt: off
std.debug.print(
stream.print(
\\# Total AIR+Liveness bytes: {}
\\# AIR Instructions: {d} ({})
\\# AIR Extra Data: {d} ({})
@ -40,65 +40,78 @@ pub fn dump(module: *Module, air: Air, liveness: Liveness) void {
fmtIntSizeBin(tomb_bytes),
liveness.extra.len, fmtIntSizeBin(liveness_extra_bytes),
liveness.special.count(), fmtIntSizeBin(liveness_special_bytes),
});
}) catch return;
// zig fmt: on
var arena = std.heap.ArenaAllocator.init(module.gpa);
defer arena.deinit();
var writer: Writer = .{
.module = module,
.gpa = module.gpa,
.arena = arena.allocator(),
.air = air,
.liveness = liveness,
.indent = 2,
.skip_body = false,
};
const stream = std.io.getStdErr().writer();
writer.writeAllConstants(stream) catch return;
stream.writeByte('\n') catch return;
writer.writeBody(stream, air.getMainBody()) catch return;
}
pub fn writeInst(
stream: anytype,
inst: Air.Inst.Index,
module: *Module,
air: Air,
liveness: Liveness,
) void {
var writer: Writer = .{
.module = module,
.gpa = module.gpa,
.air = air,
.liveness = liveness,
.indent = 2,
.skip_body = true,
};
writer.writeInst(stream, inst) catch return;
}
pub fn dump(module: *Module, air: Air, liveness: Liveness) void {
write(std.io.getStdErr().writer(), module, air, liveness);
}
pub fn dumpInst(inst: Air.Inst.Index, module: *Module, air: Air, liveness: Liveness) void {
writeInst(std.io.getStdErr().writer(), inst, module, air, liveness);
}
const Writer = struct {
module: *Module,
gpa: Allocator,
arena: Allocator,
air: Air,
liveness: Liveness,
indent: usize,
skip_body: bool,
fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void {
for (w.air.instructions.items(.tag), 0..) |tag, i| {
const inst = @intCast(u32, i);
const inst = @intCast(Air.Inst.Index, i);
switch (tag) {
.constant, .const_ty => {
try s.writeByteNTimes(' ', w.indent);
try s.print("%{d} ", .{inst});
try w.writeInst(s, inst);
try s.writeAll(")\n");
},
.constant, .const_ty => try w.writeInst(s, inst),
else => continue,
}
}
}
fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void {
for (body) |inst| {
try s.writeByteNTimes(' ', w.indent);
if (w.liveness.isUnused(inst)) {
try s.print("%{d}!", .{inst});
} else {
try s.print("%{d} ", .{inst});
}
try w.writeInst(s, inst);
try s.writeAll(")\n");
}
for (body) |inst| try w.writeInst(s, inst);
}
fn writeInst(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const tags = w.air.instructions.items(.tag);
const tag = tags[inst];
try s.print("= {s}(", .{@tagName(tags[inst])});
const tag = w.air.instructions.items(.tag)[inst];
try s.writeByteNTimes(' ', w.indent);
try s.print("%{d}{c}= {s}(", .{
inst,
@as(u8, if (w.liveness.isUnused(inst)) '!' else ' '),
@tagName(tag),
});
switch (tag) {
.add,
.addwrap,
@ -316,6 +329,7 @@ const Writer = struct {
.dbg_block_begin, .dbg_block_end => {},
}
try s.writeAll(")\n");
}
fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@ -372,6 +386,7 @@ const Writer = struct {
const body = w.air.extra[extra.end..][0..extra.data.body_len];
try w.writeType(s, w.air.getRefType(ty_pl.ty));
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
@ -703,6 +718,7 @@ const Writer = struct {
const body = w.air.extra[extra.end..][0..extra.data.body_len];
try w.writeOperand(s, inst, 0, pl_op.operand);
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
@ -721,6 +737,7 @@ const Writer = struct {
try s.writeAll(", ");
try w.writeType(s, w.air.getRefType(ty_pl.ty));
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
@ -738,6 +755,7 @@ const Writer = struct {
const liveness_condbr = w.liveness.getCondBr(inst);
try w.writeOperand(s, inst, 0, pl_op.operand);
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
@ -900,10 +918,7 @@ const Writer = struct {
dies: bool,
) @TypeOf(s).Error!void {
_ = w;
if (dies) {
try s.print("%{d}!", .{inst});
} else {
try s.print("%{d}", .{inst});
}
try s.print("%{d}", .{inst});
if (dies) try s.writeByte('!');
}
};

View File

@ -47,6 +47,7 @@ test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var flags = A{
.a = false,