mirror of
https://github.com/ziglang/zig.git
synced 2026-01-11 01:45:12 +00:00
stage2 AArch64: begin transition to MIR
This commit includes the transitions for the following instructions: - add_immediate - b - bl - blr - brk - ldp - ldr - ldrb - ldrh - mov_to_from_sp - mov_register - movk - movz - nop - ret - stp - str - strb - strh - sub_immediate - svc
This commit is contained in:
parent
969bcb6a59
commit
7fc89f64b4
@ -5,6 +5,8 @@ const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const Air = @import("../../Air.zig");
|
||||
const Zir = @import("../../Zir.zig");
|
||||
const Mir = @import("Mir.zig");
|
||||
const Emit = @import("Emit.zig");
|
||||
const Liveness = @import("../../Liveness.zig");
|
||||
const Type = @import("../../type.zig").Type;
|
||||
const Value = @import("../../value.zig").Value;
|
||||
@ -31,14 +33,12 @@ const InnerError = error{
|
||||
CodegenFail,
|
||||
};
|
||||
|
||||
arch: std.Target.Cpu.Arch,
|
||||
gpa: *Allocator,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
bin_file: *link.File,
|
||||
target: *const std.Target,
|
||||
mod_fn: *const Module.Fn,
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
err_msg: ?*ErrorMsg,
|
||||
args: []MCValue,
|
||||
@ -48,6 +48,11 @@ arg_index: usize,
|
||||
src_loc: Module.SrcLoc,
|
||||
stack_align: u32,
|
||||
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
/// MIR extra data
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
prev_di_line: u32,
|
||||
prev_di_column: u32,
|
||||
/// Byte offset within the source file of the ending curly.
|
||||
@ -237,7 +242,6 @@ const BigTomb = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn generate(
|
||||
arch: std.Target.Cpu.Arch,
|
||||
bin_file: *link.File,
|
||||
src_loc: Module.SrcLoc,
|
||||
module_fn: *Module.Fn,
|
||||
@ -246,7 +250,7 @@ pub fn generate(
|
||||
code: *std.ArrayList(u8),
|
||||
debug_output: DebugInfoOutput,
|
||||
) GenerateSymbolError!FnResult {
|
||||
if (build_options.skip_non_native and builtin.cpu.arch != arch) {
|
||||
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
|
||||
@panic("Attempted to compile for architecture that was disabled by build configuration");
|
||||
}
|
||||
|
||||
@ -262,14 +266,12 @@ pub fn generate(
|
||||
try branch_stack.append(.{});
|
||||
|
||||
var function = Self{
|
||||
.arch = arch,
|
||||
.gpa = bin_file.allocator,
|
||||
.air = air,
|
||||
.liveness = liveness,
|
||||
.target = &bin_file.options.target,
|
||||
.bin_file = bin_file,
|
||||
.mod_fn = module_fn,
|
||||
.code = code,
|
||||
.debug_output = debug_output,
|
||||
.err_msg = null,
|
||||
.args = undefined, // populated after `resolveCallingConventionValues`
|
||||
@ -305,6 +307,19 @@ pub fn generate(
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
var mir = Mir{
|
||||
.instructions = function.mir_instructions.toOwnedSlice(),
|
||||
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
|
||||
};
|
||||
defer mir.deinit(bin_file.allocator);
|
||||
|
||||
var emit = Emit{
|
||||
.mir = mir,
|
||||
.target = &bin_file.options.target,
|
||||
.code = code,
|
||||
};
|
||||
try emit.emitMir();
|
||||
|
||||
if (function.err_msg) |em| {
|
||||
return FnResult{ .fail = em };
|
||||
} else {
|
||||
@ -312,6 +327,16 @@ pub fn generate(
|
||||
}
|
||||
}
|
||||
|
||||
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
|
||||
const gpa = self.gpa;
|
||||
|
||||
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
|
||||
|
||||
const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
|
||||
self.mir_instructions.appendAssumeCapacity(inst);
|
||||
return result_index;
|
||||
}
|
||||
|
||||
fn gen(self: *Self) !void {
|
||||
const cc = self.fn_type.fnCallingConvention();
|
||||
if (cc != .Naked) {
|
||||
@ -320,15 +345,26 @@ fn gen(self: *Self) !void {
|
||||
// stp fp, lr, [sp, #-16]!
|
||||
// mov fp, sp
|
||||
// sub sp, sp, #reloc
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.stp(
|
||||
.x29,
|
||||
.x30,
|
||||
Register.sp,
|
||||
Instruction.LoadStorePairOffset.pre_index(-16),
|
||||
).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.add(.x29, .xzr, 0, false).toU32());
|
||||
const backpatch_reloc = self.code.items.len;
|
||||
try self.code.resize(backpatch_reloc + 4);
|
||||
|
||||
_ = try self.addInst(.{
|
||||
.tag = .stp,
|
||||
.data = .{ .load_store_register_pair = .{
|
||||
.rt = .x29,
|
||||
.rt2 = .x30,
|
||||
.rn = Register.sp,
|
||||
.offset = Instruction.LoadStorePairOffset.pre_index(-16),
|
||||
} },
|
||||
});
|
||||
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mov_to_from_sp,
|
||||
.data = .{ .rr = .{ .rd = .x29, .rn = .xzr } },
|
||||
});
|
||||
|
||||
const backpatch_reloc = try self.addInst(.{
|
||||
.tag = .nop,
|
||||
.data = .{ .nop = {} },
|
||||
});
|
||||
|
||||
try self.dbgSetPrologueEnd();
|
||||
|
||||
@ -338,7 +374,10 @@ fn gen(self: *Self) !void {
|
||||
const stack_end = self.max_end_stack;
|
||||
const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
|
||||
if (math.cast(u12, aligned_stack_end)) |size| {
|
||||
mem.writeIntLittle(u32, self.code.items[backpatch_reloc..][0..4], Instruction.sub(.xzr, .xzr, size, false).toU32());
|
||||
self.mir_instructions.set(backpatch_reloc, .{
|
||||
.tag = .sub_immediate,
|
||||
.data = .{ .rr_imm12_sh = .{ .rd = .xzr, .rn = .xzr, .imm12 = size } },
|
||||
});
|
||||
} else |_| {
|
||||
return self.failSymbol("TODO AArch64: allow larger stacks", .{});
|
||||
}
|
||||
@ -352,36 +391,36 @@ fn gen(self: *Self) !void {
|
||||
// the code. Therefore, we can just delete
|
||||
// the space initially reserved for the
|
||||
// jump
|
||||
self.code.items.len -= 4;
|
||||
self.mir_instructions.len -= 1;
|
||||
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
|
||||
const amt = @intCast(i32, self.code.items.len) - @intCast(i32, jmp_reloc + 8);
|
||||
if (amt == -4) {
|
||||
// This return is at the end of the
|
||||
// code block. We can't just delete
|
||||
// the space because there may be
|
||||
// other jumps we already relocated to
|
||||
// the address. Instead, insert a nop
|
||||
mem.writeIntLittle(u32, self.code.items[jmp_reloc..][0..4], Instruction.nop().toU32());
|
||||
} else {
|
||||
if (math.cast(i28, amt)) |offset| {
|
||||
mem.writeIntLittle(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(offset).toU32());
|
||||
} else |_| {
|
||||
return self.failSymbol("exitlude jump is too large", .{});
|
||||
}
|
||||
}
|
||||
self.mir_instructions.set(jmp_reloc, .{
|
||||
.tag = .b,
|
||||
.data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
|
||||
});
|
||||
}
|
||||
|
||||
// ldp fp, lr, [sp], #16
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldp(
|
||||
.x29,
|
||||
.x30,
|
||||
Register.sp,
|
||||
Instruction.LoadStorePairOffset.post_index(16),
|
||||
).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .ldp,
|
||||
.data = .{ .load_store_register_pair = .{
|
||||
.rt = .x29,
|
||||
.rt2 = .x30,
|
||||
.rn = Register.sp,
|
||||
.offset = Instruction.LoadStorePairOffset.post_index(16),
|
||||
} },
|
||||
});
|
||||
|
||||
// add sp, sp, #stack_size
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.add(.xzr, .xzr, @intCast(u12, aligned_stack_end), false).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .add_immediate,
|
||||
.data = .{ .rr_imm12_sh = .{ .rd = .xzr, .rn = .xzr, .imm12 = @intCast(u12, aligned_stack_end) } },
|
||||
});
|
||||
|
||||
// ret lr
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .ret,
|
||||
.data = .{ .reg = .x30 },
|
||||
});
|
||||
} else {
|
||||
try self.dbgSetPrologueEnd();
|
||||
try self.genBody(self.air.getMainBody());
|
||||
@ -389,7 +428,7 @@ fn gen(self: *Self) !void {
|
||||
}
|
||||
|
||||
// Drop them off at the rbrace.
|
||||
try self.dbgAdvancePCAndLine(self.end_di_line, self.end_di_column);
|
||||
// try self.dbgAdvancePCAndLine(self.end_di_line, self.end_di_column);
|
||||
}
|
||||
|
||||
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
@ -534,7 +573,7 @@ fn dbgSetPrologueEnd(self: *Self) InnerError!void {
|
||||
switch (self.debug_output) {
|
||||
.dwarf => |dbg_out| {
|
||||
try dbg_out.dbg_line.append(DW.LNS.set_prologue_end);
|
||||
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
|
||||
// try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -545,7 +584,7 @@ fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
|
||||
switch (self.debug_output) {
|
||||
.dwarf => |dbg_out| {
|
||||
try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin);
|
||||
try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
|
||||
// try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -1297,310 +1336,6 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
//return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
|
||||
}
|
||||
|
||||
fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool {
|
||||
return switch (mcv) {
|
||||
.none => unreachable,
|
||||
.undef => unreachable,
|
||||
.dead, .unreach => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.ptr_stack_offset => unreachable,
|
||||
.ptr_embedded_in_code => unreachable,
|
||||
.immediate => |imm| blk: {
|
||||
if (imm > std.math.maxInt(u32)) return self.fail("TODO ARM binary arithmetic immediate larger than u32", .{});
|
||||
|
||||
// Load immediate into register if it doesn't fit
|
||||
// in an operand
|
||||
break :blk Instruction.Operand.fromU32(@intCast(u32, imm)) == null;
|
||||
},
|
||||
.register => true,
|
||||
.stack_offset,
|
||||
.embedded_in_code,
|
||||
.memory,
|
||||
=> true,
|
||||
};
|
||||
}
|
||||
|
||||
fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue {
|
||||
// In the case of bitshifts, the type of rhs is different
|
||||
// from the resulting type
|
||||
const ty = self.air.typeOf(op_lhs);
|
||||
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
|
||||
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
|
||||
.Bool => {
|
||||
return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, 1, .unsigned);
|
||||
},
|
||||
.Int => {
|
||||
const int_info = ty.intInfo(self.target.*);
|
||||
return self.genArmBinIntOp(inst, op_lhs, op_rhs, op, int_info.bits, int_info.signedness);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn genArmBinIntOp(
|
||||
self: *Self,
|
||||
inst: Air.Inst.Index,
|
||||
op_lhs: Air.Inst.Ref,
|
||||
op_rhs: Air.Inst.Ref,
|
||||
op: Air.Inst.Tag,
|
||||
bits: u16,
|
||||
signedness: std.builtin.Signedness,
|
||||
) !MCValue {
|
||||
if (bits > 32) {
|
||||
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
|
||||
}
|
||||
|
||||
const lhs = try self.resolveInst(op_lhs);
|
||||
const rhs = try self.resolveInst(op_rhs);
|
||||
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
const lhs_should_be_register = switch (op) {
|
||||
.shr, .shl => true,
|
||||
else => try self.armOperandShouldBeRegister(lhs),
|
||||
};
|
||||
const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs);
|
||||
const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs);
|
||||
const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs);
|
||||
const can_swap_lhs_and_rhs = switch (op) {
|
||||
.shr, .shl => false,
|
||||
else => true,
|
||||
};
|
||||
|
||||
// Destination must be a register
|
||||
var dst_mcv: MCValue = undefined;
|
||||
var lhs_mcv = lhs;
|
||||
var rhs_mcv = rhs;
|
||||
var swap_lhs_and_rhs = false;
|
||||
|
||||
// Allocate registers for operands and/or destination
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
if (reuse_lhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!rhs_is_register and rhs_should_be_register) {
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
|
||||
}
|
||||
dst_mcv = lhs;
|
||||
} else if (reuse_rhs and can_swap_lhs_and_rhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!lhs_is_register and lhs_should_be_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
|
||||
}
|
||||
dst_mcv = rhs;
|
||||
|
||||
swap_lhs_and_rhs = true;
|
||||
} else {
|
||||
// Allocate 1 or 2 registers
|
||||
if (lhs_should_be_register and rhs_should_be_register) {
|
||||
if (lhs_is_register and rhs_is_register) {
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{ lhs.register, rhs.register }) };
|
||||
} else if (lhs_is_register) {
|
||||
// Move RHS to register
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) };
|
||||
rhs_mcv = dst_mcv;
|
||||
} else if (rhs_is_register) {
|
||||
// Move LHS to register
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) };
|
||||
lhs_mcv = dst_mcv;
|
||||
} else {
|
||||
// Move LHS and RHS to register
|
||||
const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{});
|
||||
lhs_mcv = MCValue{ .register = regs[0] };
|
||||
rhs_mcv = MCValue{ .register = regs[1] };
|
||||
dst_mcv = lhs_mcv;
|
||||
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
|
||||
}
|
||||
} else if (lhs_should_be_register) {
|
||||
// RHS is immediate
|
||||
if (lhs_is_register) {
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) };
|
||||
} else {
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) };
|
||||
lhs_mcv = dst_mcv;
|
||||
}
|
||||
} else if (rhs_should_be_register and can_swap_lhs_and_rhs) {
|
||||
// LHS is immediate
|
||||
if (rhs_is_register) {
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) };
|
||||
} else {
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{}) };
|
||||
rhs_mcv = dst_mcv;
|
||||
}
|
||||
|
||||
swap_lhs_and_rhs = true;
|
||||
} else unreachable; // binary operation on two immediates
|
||||
}
|
||||
|
||||
// Move the operands to the newly allocated registers
|
||||
if (lhs_mcv == .register and !lhs_is_register) {
|
||||
try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
|
||||
}
|
||||
if (rhs_mcv == .register and !rhs_is_register) {
|
||||
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
|
||||
}
|
||||
|
||||
try self.genArmBinOpCode(
|
||||
dst_mcv.register,
|
||||
lhs_mcv,
|
||||
rhs_mcv,
|
||||
swap_lhs_and_rhs,
|
||||
op,
|
||||
signedness,
|
||||
);
|
||||
return dst_mcv;
|
||||
}
|
||||
|
||||
fn genArmBinOpCode(
|
||||
self: *Self,
|
||||
dst_reg: Register,
|
||||
lhs_mcv: MCValue,
|
||||
rhs_mcv: MCValue,
|
||||
swap_lhs_and_rhs: bool,
|
||||
op: Air.Inst.Tag,
|
||||
signedness: std.builtin.Signedness,
|
||||
) !void {
|
||||
assert(lhs_mcv == .register or rhs_mcv == .register);
|
||||
|
||||
const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register;
|
||||
const op2 = if (swap_lhs_and_rhs) lhs_mcv else rhs_mcv;
|
||||
|
||||
const operand = switch (op2) {
|
||||
.none => unreachable,
|
||||
.undef => unreachable,
|
||||
.dead, .unreach => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.ptr_stack_offset => unreachable,
|
||||
.ptr_embedded_in_code => unreachable,
|
||||
.immediate => |imm| Instruction.Operand.fromU32(@intCast(u32, imm)).?,
|
||||
.register => |reg| Instruction.Operand.reg(reg, Instruction.Operand.Shift.none),
|
||||
.stack_offset,
|
||||
.embedded_in_code,
|
||||
.memory,
|
||||
=> unreachable,
|
||||
};
|
||||
|
||||
switch (op) {
|
||||
.add => {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.add(.al, dst_reg, op1, operand).toU32());
|
||||
},
|
||||
.sub => {
|
||||
if (swap_lhs_and_rhs) {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.rsb(.al, dst_reg, op1, operand).toU32());
|
||||
} else {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.sub(.al, dst_reg, op1, operand).toU32());
|
||||
}
|
||||
},
|
||||
.bool_and, .bit_and => {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.@"and"(.al, dst_reg, op1, operand).toU32());
|
||||
},
|
||||
.bool_or, .bit_or => {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, dst_reg, op1, operand).toU32());
|
||||
},
|
||||
.not, .xor => {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.eor(.al, dst_reg, op1, operand).toU32());
|
||||
},
|
||||
.cmp_eq => {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, op1, operand).toU32());
|
||||
},
|
||||
.shl => {
|
||||
assert(!swap_lhs_and_rhs);
|
||||
const shift_amount = switch (operand) {
|
||||
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
|
||||
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
|
||||
};
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.lsl(.al, dst_reg, op1, shift_amount).toU32());
|
||||
},
|
||||
.shr => {
|
||||
assert(!swap_lhs_and_rhs);
|
||||
const shift_amount = switch (operand) {
|
||||
.Register => |reg_op| Instruction.ShiftAmount.reg(@intToEnum(Register, reg_op.rm)),
|
||||
.Immediate => |imm_op| Instruction.ShiftAmount.imm(@intCast(u5, imm_op.imm)),
|
||||
};
|
||||
|
||||
const shr = switch (signedness) {
|
||||
.signed => Instruction.asr,
|
||||
.unsigned => Instruction.lsr,
|
||||
};
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), shr(.al, dst_reg, op1, shift_amount).toU32());
|
||||
},
|
||||
else => unreachable, // not a binary instruction
|
||||
}
|
||||
}
|
||||
|
||||
fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue {
|
||||
const lhs = try self.resolveInst(op_lhs);
|
||||
const rhs = try self.resolveInst(op_rhs);
|
||||
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs);
|
||||
const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs);
|
||||
|
||||
// Destination must be a register
|
||||
// LHS must be a register
|
||||
// RHS must be a register
|
||||
var dst_mcv: MCValue = undefined;
|
||||
var lhs_mcv: MCValue = lhs;
|
||||
var rhs_mcv: MCValue = rhs;
|
||||
|
||||
// Allocate registers for operands and/or destination
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
if (reuse_lhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!rhs_is_register) {
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
|
||||
}
|
||||
dst_mcv = lhs;
|
||||
} else if (reuse_rhs) {
|
||||
// Allocate 0 or 1 registers
|
||||
if (!lhs_is_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) };
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv);
|
||||
}
|
||||
dst_mcv = rhs;
|
||||
} else {
|
||||
// Allocate 1 or 2 registers
|
||||
if (lhs_is_register and rhs_is_register) {
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{ lhs.register, rhs.register }) };
|
||||
} else if (lhs_is_register) {
|
||||
// Move RHS to register
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{lhs.register}) };
|
||||
rhs_mcv = dst_mcv;
|
||||
} else if (rhs_is_register) {
|
||||
// Move LHS to register
|
||||
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(inst, &.{rhs.register}) };
|
||||
lhs_mcv = dst_mcv;
|
||||
} else {
|
||||
// Move LHS and RHS to register
|
||||
const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{});
|
||||
lhs_mcv = MCValue{ .register = regs[0] };
|
||||
rhs_mcv = MCValue{ .register = regs[1] };
|
||||
dst_mcv = lhs_mcv;
|
||||
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv);
|
||||
}
|
||||
}
|
||||
|
||||
// Move the operands to the newly allocated registers
|
||||
if (!lhs_is_register) {
|
||||
try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs);
|
||||
}
|
||||
if (!rhs_is_register) {
|
||||
try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs);
|
||||
}
|
||||
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32());
|
||||
return dst_mcv;
|
||||
}
|
||||
|
||||
fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void {
|
||||
const ty_str = self.air.instructions.items(.data)[inst].ty_str;
|
||||
const zir = &self.mod_fn.owner_decl.getFileScope().zir;
|
||||
@ -1668,7 +1403,10 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airBreakpoint(self: *Self) !void {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.brk(1).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .brk,
|
||||
.data = .{ .imm16 = 1 },
|
||||
});
|
||||
return self.finishAirBookkeeping();
|
||||
}
|
||||
|
||||
@ -1736,7 +1474,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
|
||||
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .blr,
|
||||
.data = .{ .reg = .x30 },
|
||||
});
|
||||
} else if (func_value.castTag(.extern_fn)) |_| {
|
||||
return self.fail("TODO implement calling extern functions", .{});
|
||||
} else {
|
||||
@ -1789,14 +1530,22 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.memory = func.owner_decl.link.macho.local_sym_index,
|
||||
});
|
||||
// blr x30
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .blr,
|
||||
.data = .{ .reg = .x30 },
|
||||
});
|
||||
} else if (func_value.castTag(.extern_fn)) |func_payload| {
|
||||
const decl = func_payload.data;
|
||||
const n_strx = try macho_file.addExternFn(mem.spanZ(decl.name));
|
||||
const offset = blk: {
|
||||
const offset = @intCast(u32, self.code.items.len);
|
||||
// TODO add a pseudo-instruction
|
||||
const offset = @intCast(u32, self.mir_instructions.len);
|
||||
// bl
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.bl(0).toU32());
|
||||
// mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.bl(0).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .bl,
|
||||
.data = .{ .nop = {} },
|
||||
});
|
||||
break :blk offset;
|
||||
};
|
||||
// Add relocation to the decl.
|
||||
@ -1857,7 +1606,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
|
||||
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .blr,
|
||||
.data = .{ .reg = .x30 },
|
||||
});
|
||||
} else if (func_value.castTag(.extern_fn)) |_| {
|
||||
return self.fail("TODO implement calling extern functions", .{});
|
||||
} else {
|
||||
@ -1899,8 +1651,11 @@ fn ret(self: *Self, mcv: MCValue) !void {
|
||||
const ret_ty = self.fn_type.fnReturnType();
|
||||
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
|
||||
// Just add space for an instruction, patch this later
|
||||
try self.code.resize(self.code.items.len + 4);
|
||||
try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
|
||||
const index = try self.addInst(.{
|
||||
.tag = .nop,
|
||||
.data = .{ .nop = {} },
|
||||
});
|
||||
try self.exitlude_jump_relocs.append(self.gpa, index);
|
||||
}
|
||||
|
||||
fn airRet(self: *Self, inst: Air.Inst.Index) !void {
|
||||
@ -1939,7 +1694,8 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
|
||||
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
|
||||
try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column);
|
||||
_ = dbg_stmt;
|
||||
// try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column);
|
||||
return self.finishAirBookkeeping();
|
||||
}
|
||||
|
||||
@ -2090,19 +1846,18 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const start_index = self.code.items.len;
|
||||
const start_index = @intCast(u32, self.mir_instructions.len);
|
||||
try self.genBody(body);
|
||||
try self.jump(start_index);
|
||||
return self.finishAirBookkeeping();
|
||||
}
|
||||
|
||||
/// Send control flow to the `index` of `self.code`.
|
||||
fn jump(self: *Self, index: usize) !void {
|
||||
if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32());
|
||||
} else |_| {
|
||||
return self.fail("TODO: enable larger branch offset", .{});
|
||||
}
|
||||
/// Send control flow to `inst`.
|
||||
fn jump(self: *Self, inst: Mir.Inst.Index) !void {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .b,
|
||||
.data = .{ .inst = inst },
|
||||
});
|
||||
}
|
||||
|
||||
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
|
||||
@ -2140,19 +1895,8 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn performReloc(self: *Self, reloc: Reloc) !void {
|
||||
switch (reloc) {
|
||||
.rel32 => |pos| {
|
||||
const amt = self.code.items.len - (pos + 4);
|
||||
// Here it would be tempting to implement testing for amt == 0 and then elide the
|
||||
// jump. However, that will cause a problem because other jumps may assume that they
|
||||
// can jump to this code. Or maybe I didn't understand something when I was debugging.
|
||||
// It could be worth another look. Anyway, that's why that isn't done here. Probably the
|
||||
// best place to elide jumps will be in semantic analysis, by inlining blocks that only
|
||||
// only have 1 break instruction.
|
||||
const s32_amt = math.cast(i32, amt) catch
|
||||
return self.fail("unable to perform relocation: jump too far", .{});
|
||||
mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt);
|
||||
},
|
||||
.arm_branch => unreachable,
|
||||
.rel32 => return self.fail("TODO reloc.rel32 for {}", .{self.target.cpu.arch}),
|
||||
.arm_branch => return self.fail("TODO reloc.arm_branch for {}", .{self.target.cpu.arch}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -2244,9 +1988,15 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
if (mem.eql(u8, asm_source, "svc #0")) {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x0).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .svc,
|
||||
.data = .{ .imm16 = 0x0 },
|
||||
});
|
||||
} else if (mem.eql(u8, asm_source, "svc #0x80")) {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .svc,
|
||||
.data = .{ .imm16 = 0x80 },
|
||||
});
|
||||
} else {
|
||||
return self.fail("TODO implement support for more aarch64 assembly instructions", .{});
|
||||
}
|
||||
@ -2333,6 +2083,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
return self.fail("TODO implement set stack variable from embedded_in_code", .{});
|
||||
},
|
||||
.register => |reg| {
|
||||
_ = reg;
|
||||
|
||||
const abi_size = ty.abiSize(self.target.*);
|
||||
const adj_off = stack_offset + abi_size;
|
||||
|
||||
@ -2347,16 +2099,21 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
.aarch64_32 => .w29,
|
||||
else => unreachable,
|
||||
};
|
||||
const str = switch (abi_size) {
|
||||
1 => Instruction.strb,
|
||||
2 => Instruction.strh,
|
||||
4, 8 => Instruction.str,
|
||||
const tag: Mir.Inst.Tag = switch (abi_size) {
|
||||
1 => .strb,
|
||||
2 => .strh,
|
||||
4, 8 => .str,
|
||||
else => unreachable, // unexpected abi size
|
||||
};
|
||||
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), str(reg, rn, .{
|
||||
.offset = offset,
|
||||
}).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = tag,
|
||||
.data = .{ .load_store_register = .{
|
||||
.rt = reg,
|
||||
.rn = rn,
|
||||
.offset = offset,
|
||||
} },
|
||||
});
|
||||
},
|
||||
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
|
||||
}
|
||||
@ -2392,20 +2149,28 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
}
|
||||
},
|
||||
.immediate => |x| {
|
||||
if (x <= math.maxInt(u16)) {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @intCast(u16, x), 0).toU32());
|
||||
} else if (x <= math.maxInt(u32)) {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @truncate(u16, x), 0).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 16), 16).toU32());
|
||||
} else if (x <= math.maxInt(u32)) {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @truncate(u16, x), 0).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @truncate(u16, x >> 16), 16).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 32), 32).toU32());
|
||||
} else {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movz(reg, @truncate(u16, x), 0).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @truncate(u16, x >> 16), 16).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @truncate(u16, x >> 32), 32).toU32());
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.movk(reg, @intCast(u16, x >> 48), 48).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .movz,
|
||||
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x) } },
|
||||
});
|
||||
|
||||
if (x > math.maxInt(u16)) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .movk,
|
||||
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 16), .hw = 1 } },
|
||||
});
|
||||
}
|
||||
if (x > math.maxInt(u32)) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .movk,
|
||||
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 32), .hw = 2 } },
|
||||
});
|
||||
}
|
||||
if (x > math.maxInt(u48)) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .movk,
|
||||
.data = .{ .r_imm16_sh = .{ .rd = reg, .imm16 = @truncate(u16, x >> 48), .hw = 3 } },
|
||||
});
|
||||
}
|
||||
},
|
||||
.register => |src_reg| {
|
||||
@ -2414,30 +2179,36 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
return;
|
||||
|
||||
// mov reg, src_reg
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(
|
||||
reg,
|
||||
.xzr,
|
||||
src_reg,
|
||||
Instruction.Shift.none,
|
||||
).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mov_register,
|
||||
.data = .{ .rr = .{ .rd = reg, .rn = src_reg } },
|
||||
});
|
||||
},
|
||||
.memory => |addr| {
|
||||
if (self.bin_file.options.pie) {
|
||||
// PC-relative displacement to the entry in the GOT table.
|
||||
// adrp
|
||||
const offset = @intCast(u32, self.code.items.len);
|
||||
mem.writeIntLittle(
|
||||
u32,
|
||||
try self.code.addManyAsArray(4),
|
||||
Instruction.adrp(reg, 0).toU32(),
|
||||
);
|
||||
// TODO add a pseudo instruction
|
||||
const offset = @intCast(u32, self.mir_instructions.len);
|
||||
// mem.writeIntLittle(
|
||||
// u32,
|
||||
// try self.code.addManyAsArray(4),
|
||||
// Instruction.adrp(reg, 0).toU32(),
|
||||
// );
|
||||
_ = try self.addInst(.{
|
||||
.tag = .nop,
|
||||
.data = .{ .nop = {} },
|
||||
});
|
||||
|
||||
// ldr reg, reg, offset
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{
|
||||
.register = .{
|
||||
_ = try self.addInst(.{
|
||||
.tag = .ldr,
|
||||
.data = .{ .load_store_register = .{
|
||||
.rt = reg,
|
||||
.rn = reg,
|
||||
.offset = Instruction.LoadStoreOffset.imm(0),
|
||||
},
|
||||
}).toU32());
|
||||
} },
|
||||
});
|
||||
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
// TODO I think the reloc might be in the wrong place.
|
||||
@ -2469,7 +2240,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try self.genSetReg(Type.initTag(.usize), reg, .{ .immediate = addr });
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{ .rn = reg } }).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = .ldr,
|
||||
.data = .{ .load_store_register = .{
|
||||
.rt = reg,
|
||||
.rn = reg,
|
||||
.offset = Instruction.LoadStoreOffset.none,
|
||||
} },
|
||||
});
|
||||
}
|
||||
},
|
||||
.stack_offset => |unadjusted_off| {
|
||||
@ -2489,22 +2267,22 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off }));
|
||||
|
||||
switch (abi_size) {
|
||||
1, 2 => {
|
||||
const ldr = switch (abi_size) {
|
||||
1 => Instruction.ldrb,
|
||||
2 => Instruction.ldrh,
|
||||
1, 2, 4, 8 => {
|
||||
const tag: Mir.Inst.Tag = switch (abi_size) {
|
||||
1 => .ldrb,
|
||||
2 => .ldrh,
|
||||
4, 8 => .ldr,
|
||||
else => unreachable, // unexpected abi size
|
||||
};
|
||||
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), ldr(reg, rn, .{
|
||||
.offset = offset,
|
||||
}).toU32());
|
||||
},
|
||||
4, 8 => {
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{
|
||||
.rn = rn,
|
||||
.offset = offset,
|
||||
} }).toU32());
|
||||
_ = try self.addInst(.{
|
||||
.tag = tag,
|
||||
.data = .{ .load_store_register = .{
|
||||
.rt = reg,
|
||||
.rn = rn,
|
||||
.offset = offset,
|
||||
} },
|
||||
});
|
||||
},
|
||||
else => return self.fail("TODO implement genSetReg other types abi_size={}", .{abi_size}),
|
||||
}
|
||||
|
||||
199
src/arch/aarch64/Emit.zig
Normal file
199
src/arch/aarch64/Emit.zig
Normal file
@ -0,0 +1,199 @@
|
||||
//! This file contains the functionality for lowering AArch64 MIR into
|
||||
//! machine code
|
||||
|
||||
const Emit = @This();
|
||||
const std = @import("std");
|
||||
const Mir = @import("Mir.zig");
|
||||
const bits = @import("bits.zig");
|
||||
const Instruction = bits.Instruction;
|
||||
|
||||
mir: Mir,
|
||||
target: *const std.Target,
|
||||
code: *std.ArrayList(u8),
|
||||
|
||||
pub fn emitMir(
|
||||
emit: *Emit,
|
||||
) !void {
|
||||
const mir_tags = emit.mir.instructions.items(.tag);
|
||||
|
||||
for (mir_tags) |tag, index| {
|
||||
const inst = @intCast(u32, index);
|
||||
switch (tag) {
|
||||
.add_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
.sub_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
|
||||
.b => try emit.mirBranch(inst),
|
||||
.bl => try emit.mirBranch(inst),
|
||||
|
||||
.blr => try emit.mirUnconditionalBranchRegister(inst),
|
||||
.ret => try emit.mirUnconditionalBranchRegister(inst),
|
||||
|
||||
.brk => try emit.mirExceptionGeneration(inst),
|
||||
.svc => try emit.mirExceptionGeneration(inst),
|
||||
|
||||
.ldp => try emit.mirLoadStoreRegisterPair(inst),
|
||||
.stp => try emit.mirLoadStoreRegisterPair(inst),
|
||||
|
||||
.ldr => try emit.mirLoadStoreRegister(inst),
|
||||
.ldrb => try emit.mirLoadStoreRegister(inst),
|
||||
.ldrh => try emit.mirLoadStoreRegister(inst),
|
||||
.str => try emit.mirLoadStoreRegister(inst),
|
||||
.strb => try emit.mirLoadStoreRegister(inst),
|
||||
.strh => try emit.mirLoadStoreRegister(inst),
|
||||
|
||||
.mov_register => try emit.mirMoveRegister(inst),
|
||||
.mov_to_from_sp => try emit.mirMoveRegister(inst),
|
||||
|
||||
.movk => try emit.mirMoveWideImmediate(inst),
|
||||
.movz => try emit.mirMoveWideImmediate(inst),
|
||||
|
||||
.nop => try emit.mirNop(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
|
||||
const endian = emit.target.cpu.arch.endian();
|
||||
std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
|
||||
}
|
||||
|
||||
fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh;
|
||||
|
||||
switch (tag) {
|
||||
.add_immediate => try emit.writeInstruction(Instruction.add(
|
||||
rr_imm12_sh.rd,
|
||||
rr_imm12_sh.rn,
|
||||
rr_imm12_sh.imm12,
|
||||
rr_imm12_sh.sh == 1,
|
||||
)),
|
||||
.sub_immediate => try emit.writeInstruction(Instruction.sub(
|
||||
rr_imm12_sh.rd,
|
||||
rr_imm12_sh.rn,
|
||||
rr_imm12_sh.imm12,
|
||||
rr_imm12_sh.sh == 1,
|
||||
)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const target_inst = emit.mir.instructions.items(.data)[inst].inst;
|
||||
_ = tag;
|
||||
_ = target_inst;
|
||||
|
||||
switch (tag) {
|
||||
.b => @panic("Implement mirBranch"),
|
||||
.bl => @panic("Implement mirBranch"),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirUnconditionalBranchRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const reg = emit.mir.instructions.items(.data)[inst].reg;
|
||||
|
||||
switch (tag) {
|
||||
.blr => try emit.writeInstruction(Instruction.blr(reg)),
|
||||
.ret => try emit.writeInstruction(Instruction.ret(reg)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirExceptionGeneration(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const imm16 = emit.mir.instructions.items(.data)[inst].imm16;
|
||||
|
||||
switch (tag) {
|
||||
.brk => try emit.writeInstruction(Instruction.brk(imm16)),
|
||||
.svc => try emit.writeInstruction(Instruction.svc(imm16)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirLoadStoreRegisterPair(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const load_store_register_pair = emit.mir.instructions.items(.data)[inst].load_store_register_pair;
|
||||
|
||||
switch (tag) {
|
||||
.stp => try emit.writeInstruction(Instruction.stp(
|
||||
load_store_register_pair.rt,
|
||||
load_store_register_pair.rt2,
|
||||
load_store_register_pair.rn,
|
||||
load_store_register_pair.offset,
|
||||
)),
|
||||
.ldp => try emit.writeInstruction(Instruction.ldp(
|
||||
load_store_register_pair.rt,
|
||||
load_store_register_pair.rt2,
|
||||
load_store_register_pair.rn,
|
||||
load_store_register_pair.offset,
|
||||
)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirLoadStoreRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const load_store_register = emit.mir.instructions.items(.data)[inst].load_store_register;
|
||||
|
||||
switch (tag) {
|
||||
.ldr => try emit.writeInstruction(Instruction.ldr(
|
||||
load_store_register.rt,
|
||||
.{ .register = .{ .rn = load_store_register.rn, .offset = load_store_register.offset } },
|
||||
)),
|
||||
.ldrb => try emit.writeInstruction(Instruction.ldrb(
|
||||
load_store_register.rt,
|
||||
load_store_register.rn,
|
||||
.{ .offset = load_store_register.offset },
|
||||
)),
|
||||
.ldrh => try emit.writeInstruction(Instruction.ldrh(
|
||||
load_store_register.rt,
|
||||
load_store_register.rn,
|
||||
.{ .offset = load_store_register.offset },
|
||||
)),
|
||||
.str => try emit.writeInstruction(Instruction.str(
|
||||
load_store_register.rt,
|
||||
load_store_register.rn,
|
||||
.{ .offset = load_store_register.offset },
|
||||
)),
|
||||
.strb => try emit.writeInstruction(Instruction.strb(
|
||||
load_store_register.rt,
|
||||
load_store_register.rn,
|
||||
.{ .offset = load_store_register.offset },
|
||||
)),
|
||||
.strh => try emit.writeInstruction(Instruction.strh(
|
||||
load_store_register.rt,
|
||||
load_store_register.rn,
|
||||
.{ .offset = load_store_register.offset },
|
||||
)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const rr = emit.mir.instructions.items(.data)[inst].rr;
|
||||
|
||||
switch (tag) {
|
||||
.mov_register => try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, Instruction.Shift.none)),
|
||||
.mov_to_from_sp => try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirMoveWideImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const r_imm16_sh = emit.mir.instructions.items(.data)[inst].r_imm16_sh;
|
||||
|
||||
switch (tag) {
|
||||
.movz => try emit.writeInstruction(Instruction.movz(r_imm16_sh.rd, r_imm16_sh.imm16, @as(u6, r_imm16_sh.hw) << 4)),
|
||||
.movk => try emit.writeInstruction(Instruction.movk(r_imm16_sh.rd, r_imm16_sh.imm16, @as(u6, r_imm16_sh.hw) << 4)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirNop(emit: *Emit) !void {
|
||||
try emit.writeInstruction(Instruction.nop());
|
||||
}
|
||||
158
src/arch/aarch64/Mir.zig
Normal file
158
src/arch/aarch64/Mir.zig
Normal file
@ -0,0 +1,158 @@
|
||||
//! Machine Intermediate Representation.
|
||||
//! This data is produced by AArch64 Codegen or AArch64 assembly parsing
|
||||
//! These instructions have a 1:1 correspondence with machine code instructions
|
||||
//! for the target. MIR can be lowered to source-annotated textual assembly code
|
||||
//! instructions, or it can be lowered to machine code.
|
||||
//! The main purpose of MIR is to postpone the assignment of offsets until Isel,
|
||||
//! so that, for example, the smaller encodings of jump instructions can be used.
|
||||
|
||||
const Mir = @This();
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const bits = @import("bits.zig");
|
||||
const Register = bits.Register;
|
||||
|
||||
instructions: std.MultiArrayList(Inst).Slice,
|
||||
/// The meaning of this data is determined by `Inst.Tag` value.
|
||||
extra: []const u32,
|
||||
|
||||
pub const Inst = struct {
|
||||
tag: Tag,
|
||||
/// The meaning of this depends on `tag`.
|
||||
data: Data,
|
||||
|
||||
pub const Tag = enum(u16) {
|
||||
/// Add (immediate)
|
||||
add_immediate,
|
||||
/// Branch
|
||||
b,
|
||||
/// Branch with Link
|
||||
bl,
|
||||
/// Branch with Link to Register
|
||||
blr,
|
||||
/// Breakpoint
|
||||
brk,
|
||||
/// Load Pair of Registers
|
||||
ldp,
|
||||
/// Load Register
|
||||
// TODO: split into ldr_immediate and ldr_register
|
||||
ldr,
|
||||
/// Load Register Byte
|
||||
// TODO: split into ldrb_immediate and ldrb_register
|
||||
ldrb,
|
||||
/// Load Register Halfword
|
||||
// TODO: split into ldrh_immediate and ldrh_register
|
||||
ldrh,
|
||||
/// Move (to/from SP)
|
||||
mov_to_from_sp,
|
||||
/// Move (register)
|
||||
mov_register,
|
||||
/// Move wide with keep
|
||||
movk,
|
||||
/// Move wide with zero
|
||||
movz,
|
||||
/// No Operation
|
||||
nop,
|
||||
/// Return from subroutine
|
||||
ret,
|
||||
/// Store Pair of Registers
|
||||
stp,
|
||||
/// Store Register
|
||||
// TODO: split into str_immediate and str_register
|
||||
str,
|
||||
/// Store Register Byte
|
||||
// TODO: split into strb_immediate and strb_register
|
||||
strb,
|
||||
/// Store Register Halfword
|
||||
// TODO: split into strh_immediate and strh_register
|
||||
strh,
|
||||
/// Subtract (immediate)
|
||||
sub_immediate,
|
||||
/// Supervisor Call
|
||||
svc,
|
||||
};
|
||||
|
||||
/// The position of an MIR instruction within the `Mir` instructions array.
|
||||
pub const Index = u32;
|
||||
|
||||
/// All instructions have a 4-byte payload, which is contained within
|
||||
/// this union. `Tag` determines which union field is active, as well as
|
||||
/// how to interpret the data within.
|
||||
pub const Data = union {
|
||||
/// No additional data
|
||||
///
|
||||
/// Used by e.g. nop
|
||||
nop: void,
|
||||
/// Another instruction.
|
||||
///
|
||||
/// Used by e.g. b
|
||||
inst: Index,
|
||||
/// A 16-bit immediate value.
|
||||
///
|
||||
/// Used by e.g. svc
|
||||
imm16: u16,
|
||||
/// Index into `extra`. Meaning of what can be found there is context-dependent.
|
||||
payload: u32,
|
||||
/// A register
|
||||
///
|
||||
/// Used by e.g. blr
|
||||
reg: Register,
|
||||
/// A register, an unsigned 16-bit immediate, and an optional shift
|
||||
///
|
||||
/// Used by e.g. movz
|
||||
r_imm16_sh: struct {
|
||||
rd: Register,
|
||||
imm16: u16,
|
||||
hw: u2 = 0,
|
||||
},
|
||||
/// Two registers
|
||||
///
|
||||
/// Used by e.g. mov_register
|
||||
rr: struct {
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
},
|
||||
/// Two registers, an unsigned 12-bit immediate, and an optional shift
|
||||
///
|
||||
/// Used by e.g. sub_immediate
|
||||
rr_imm12_sh: struct {
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
imm12: u12,
|
||||
sh: u1 = 0,
|
||||
},
|
||||
/// Three registers and a LoadStoreOffset
|
||||
///
|
||||
/// Used by e.g. str_register
|
||||
load_store_register: struct {
|
||||
rt: Register,
|
||||
rn: Register,
|
||||
offset: bits.Instruction.LoadStoreOffset,
|
||||
},
|
||||
/// Three registers and a LoadStorePairOffset
|
||||
///
|
||||
/// Used by e.g. stp
|
||||
load_store_register_pair: struct {
|
||||
rt: Register,
|
||||
rt2: Register,
|
||||
rn: Register,
|
||||
offset: bits.Instruction.LoadStorePairOffset,
|
||||
},
|
||||
};
|
||||
|
||||
// Make sure we don't accidentally make instructions bigger than expected.
|
||||
// Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
|
||||
// comptime {
|
||||
// if (builtin.mode != .Debug) {
|
||||
// assert(@sizeOf(Inst) == 8);
|
||||
// }
|
||||
// }
|
||||
};
|
||||
|
||||
pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
|
||||
mir.instructions.deinit(gpa);
|
||||
gpa.free(mir.extra);
|
||||
mir.* = undefined;
|
||||
}
|
||||
@ -88,9 +88,10 @@ pub fn generateFunction(
|
||||
.wasm64 => unreachable, // has its own code path
|
||||
.arm => return Function(.arm).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
.armeb => return Function(.armeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
.aarch64 => return @import("arch/aarch64/CodeGen.zig").generate(.aarch64, bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
.aarch64_be => return @import("arch/aarch64/CodeGen.zig").generate(.aarch64_be, bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
.aarch64_32 => return @import("arch/aarch64/CodeGen.zig").generate(.aarch64_32, bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.aarch64_32,
|
||||
=> return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
//.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
//.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
//.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user