mirror of
https://github.com/ziglang/zig.git
synced 2026-01-30 19:23:37 +00:00
Merge pull request #11304 from joachimschmidt557/stage2-aarch64
stage2 AArch64: remove MCValue.embedded_in_code
This commit is contained in:
commit
bcd7eb012a
@ -115,11 +115,6 @@ const MCValue = union(enum) {
|
||||
/// A pointer-sized integer that fits in a register.
|
||||
/// If the type is a pointer, this is the pointer address in virtual address space.
|
||||
immediate: u64,
|
||||
/// The constant was emitted into the code, at this offset.
|
||||
/// If the type is a pointer, it means the pointer address is embedded in the code.
|
||||
embedded_in_code: usize,
|
||||
/// The value is a pointer to a constant which was emitted into the code, at this offset.
|
||||
ptr_embedded_in_code: usize,
|
||||
/// The value is in a target-specific register.
|
||||
register: Register,
|
||||
/// The value is in memory at a hard-coded address.
|
||||
@ -147,7 +142,7 @@ const MCValue = union(enum) {
|
||||
|
||||
fn isMemory(mcv: MCValue) bool {
|
||||
return switch (mcv) {
|
||||
.embedded_in_code, .memory, .stack_offset => true,
|
||||
.memory, .stack_offset => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
@ -166,12 +161,10 @@ const MCValue = union(enum) {
|
||||
.dead => unreachable,
|
||||
|
||||
.immediate,
|
||||
.embedded_in_code,
|
||||
.memory,
|
||||
.compare_flags_unsigned,
|
||||
.compare_flags_signed,
|
||||
.ptr_stack_offset,
|
||||
.ptr_embedded_in_code,
|
||||
.undef,
|
||||
=> false,
|
||||
|
||||
@ -816,10 +809,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
|
||||
if (abi_align > self.stack_align)
|
||||
self.stack_align = abi_align;
|
||||
// TODO find a free slot instead of always appending
|
||||
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align);
|
||||
self.next_stack_offset = offset + abi_size;
|
||||
if (self.next_stack_offset > self.max_end_stack)
|
||||
self.max_end_stack = self.next_stack_offset;
|
||||
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
|
||||
self.next_stack_offset = offset;
|
||||
self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset);
|
||||
try self.stack.putNoClobber(self.gpa, offset, .{
|
||||
.inst = inst,
|
||||
.size = abi_size,
|
||||
@ -832,9 +824,10 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
|
||||
const elem_ty = self.air.typeOfIndex(inst).elemType();
|
||||
|
||||
if (!elem_ty.hasRuntimeBits()) {
|
||||
// As this stack item will never be dereferenced at runtime,
|
||||
// return the current stack offset
|
||||
return self.next_stack_offset;
|
||||
// return the stack offset 0. Stack offset 0 will be where all
|
||||
// zero-sized stack allocations live as non-zero-sized
|
||||
// allocations will always have an offset > 0.
|
||||
return @as(u32, 0);
|
||||
}
|
||||
|
||||
const target = self.target.*;
|
||||
@ -1058,7 +1051,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mvn,
|
||||
.data = .{ .rr_imm6_shift = .{
|
||||
.data = .{ .rr_imm6_logical_shift = .{
|
||||
.rd = dest_reg,
|
||||
.rm = op_reg,
|
||||
.imm6 = 0,
|
||||
@ -1104,8 +1097,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
|
||||
try self.genSetStack(ptr_ty, stack_offset + ptr_bytes, ptr);
|
||||
try self.genSetStack(len_ty, stack_offset, len);
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
};
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
@ -1188,6 +1181,7 @@ fn binOpRegister(
|
||||
.sub,
|
||||
.ptr_sub,
|
||||
=> .sub_shifted_register,
|
||||
.cmp_eq => .cmp_shifted_register,
|
||||
.mul => .mul,
|
||||
.bit_and,
|
||||
.bool_and,
|
||||
@ -1219,6 +1213,12 @@ fn binOpRegister(
|
||||
.imm6 = 0,
|
||||
.shift = .lsl,
|
||||
} },
|
||||
.cmp_eq => .{ .rr_imm6_shift = .{
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
.imm6 = 0,
|
||||
.shift = .lsl,
|
||||
} },
|
||||
.mul,
|
||||
.shl,
|
||||
.shl_exact,
|
||||
@ -1296,20 +1296,23 @@ fn binOpImmediate(
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_reg = switch (tag) {
|
||||
.cmp_eq => undefined, // cmp has no destination register
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
if (lhs_is_register and self.reuseOperand(
|
||||
inst,
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
if (lhs_and_rhs_swapped) 1 else 0,
|
||||
lhs,
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null);
|
||||
if (lhs_is_register and self.reuseOperand(
|
||||
inst,
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
if (lhs_and_rhs_swapped) 1 else 0,
|
||||
lhs,
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null),
|
||||
};
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
|
||||
@ -1325,6 +1328,7 @@ fn binOpImmediate(
|
||||
.signed => Mir.Inst.Tag.asr_immediate,
|
||||
.unsigned => Mir.Inst.Tag.lsr_immediate,
|
||||
},
|
||||
.cmp_eq => .cmp_immediate,
|
||||
else => unreachable,
|
||||
};
|
||||
const mir_data: Mir.Inst.Data = switch (tag) {
|
||||
@ -1344,6 +1348,10 @@ fn binOpImmediate(
|
||||
.rn = lhs_reg,
|
||||
.shift = @intCast(u6, rhs.immediate),
|
||||
} },
|
||||
.cmp_eq => .{ .r_imm12_sh = .{
|
||||
.rn = lhs_reg,
|
||||
.imm12 = @intCast(u12, rhs.immediate),
|
||||
} },
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
@ -1381,6 +1389,7 @@ fn binOp(
|
||||
// Arithmetic operations on integers and floats
|
||||
.add,
|
||||
.sub,
|
||||
.cmp_eq,
|
||||
=> {
|
||||
switch (lhs_ty.zigTypeTag()) {
|
||||
.Float => return self.fail("TODO binary operations on floats", .{}),
|
||||
@ -1394,12 +1403,13 @@ fn binOp(
|
||||
// operands
|
||||
const lhs_immediate_ok = switch (tag) {
|
||||
.add => lhs == .immediate and lhs.immediate <= std.math.maxInt(u12),
|
||||
.sub => false,
|
||||
.sub, .cmp_eq => false,
|
||||
else => unreachable,
|
||||
};
|
||||
const rhs_immediate_ok = switch (tag) {
|
||||
.add,
|
||||
.sub,
|
||||
.cmp_eq,
|
||||
=> rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
|
||||
else => unreachable,
|
||||
};
|
||||
@ -1505,7 +1515,13 @@ fn binOp(
|
||||
const elem_size = elem_ty.abiSize(self.target.*);
|
||||
|
||||
if (elem_size == 1) {
|
||||
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const base_tag: Air.Inst.Tag = switch (tag) {
|
||||
.ptr_add => .add,
|
||||
.ptr_sub => .sub,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
} else {
|
||||
// convert the offset into a byte offset by
|
||||
// multiplying it with elem_size
|
||||
@ -1714,14 +1730,12 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
const mcv = try self.resolveInst(ty_op.operand);
|
||||
switch (mcv) {
|
||||
.dead, .unreach, .none => unreachable,
|
||||
.register => unreachable, // a slice doesn't fit in one register
|
||||
.stack_offset => |off| {
|
||||
break :result MCValue{ .stack_offset = off + ptr_bytes };
|
||||
break :result MCValue{ .stack_offset = off };
|
||||
},
|
||||
.memory => |addr| {
|
||||
break :result MCValue{ .memory = addr };
|
||||
@ -1742,7 +1756,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.dead, .unreach, .none => unreachable,
|
||||
.register => unreachable, // a slice doesn't fit in one register
|
||||
.stack_offset => |off| {
|
||||
break :result MCValue{ .stack_offset = off };
|
||||
break :result MCValue{ .stack_offset = off - ptr_bytes };
|
||||
},
|
||||
.memory => |addr| {
|
||||
break :result MCValue{ .memory = addr + ptr_bytes };
|
||||
@ -1762,7 +1776,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
switch (mcv) {
|
||||
.dead, .unreach, .none => unreachable,
|
||||
.ptr_stack_offset => |off| {
|
||||
break :result MCValue{ .ptr_stack_offset = off + ptr_bytes };
|
||||
break :result MCValue{ .ptr_stack_offset = off - ptr_bytes };
|
||||
},
|
||||
else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
|
||||
}
|
||||
@ -1809,7 +1823,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register});
|
||||
|
||||
const base_mcv: MCValue = switch (slice_mcv) {
|
||||
.stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off + 8 }) },
|
||||
.stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off }) },
|
||||
else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}),
|
||||
};
|
||||
self.register_manager.freezeRegs(&.{base_mcv.register});
|
||||
@ -1949,12 +1963,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.compare_flags_signed => unreachable,
|
||||
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
|
||||
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
|
||||
.ptr_embedded_in_code => |off| {
|
||||
try self.setRegOrMem(elem_ty, dst_mcv, .{ .embedded_in_code = off });
|
||||
},
|
||||
.embedded_in_code => {
|
||||
return self.fail("TODO implement loading from MCValue.embedded_in_code", .{});
|
||||
},
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
@ -1963,7 +1971,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.dead => unreachable,
|
||||
.undef => unreachable,
|
||||
.compare_flags_signed, .compare_flags_unsigned => unreachable,
|
||||
.embedded_in_code => unreachable,
|
||||
.register => |dst_reg| {
|
||||
try self.genLdrRegister(dst_reg, addr_reg, elem_size);
|
||||
},
|
||||
@ -2036,8 +2043,7 @@ fn genInlineMemcpy(
|
||||
// cmp count, len
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_shifted_register,
|
||||
.data = .{ .rrr_imm6_shift = .{
|
||||
.rd = .xzr,
|
||||
.data = .{ .rr_imm6_shift = .{
|
||||
.rn = count,
|
||||
.rm = len,
|
||||
.imm6 = 0,
|
||||
@ -2227,12 +2233,6 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.ptr_stack_offset => |off| {
|
||||
try self.genSetStack(value_ty, off, value);
|
||||
},
|
||||
.ptr_embedded_in_code => |off| {
|
||||
try self.setRegOrMem(value_ty, .{ .embedded_in_code = off }, value);
|
||||
},
|
||||
.embedded_in_code => {
|
||||
return self.fail("TODO implement storing to MCValue.embedded_in_code", .{});
|
||||
},
|
||||
.register => |addr_reg| {
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
@ -2297,13 +2297,10 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const ptr_ty = self.air.typeOf(operand);
|
||||
const struct_ty = ptr_ty.childType();
|
||||
const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
|
||||
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
|
||||
const struct_field_ty = struct_ty.structFieldType(index);
|
||||
const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
|
||||
switch (mcv) {
|
||||
.ptr_stack_offset => |off| {
|
||||
break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size };
|
||||
break :result MCValue{ .ptr_stack_offset = off - struct_field_offset };
|
||||
},
|
||||
else => {
|
||||
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
|
||||
@ -2445,7 +2442,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
|
||||
.immediate => unreachable,
|
||||
.unreach => unreachable,
|
||||
.dead => unreachable,
|
||||
.embedded_in_code => unreachable,
|
||||
.memory => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
@ -2461,9 +2457,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
|
||||
.ptr_stack_offset => {
|
||||
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
|
||||
},
|
||||
.ptr_embedded_in_code => {
|
||||
return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -2615,107 +2608,48 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
|
||||
if (self.liveness.isUnused(inst))
|
||||
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
|
||||
const ty = self.air.typeOf(bin_op.lhs);
|
||||
|
||||
if (ty.abiSize(self.target.*) > 8) {
|
||||
return self.fail("TODO cmp for types with size > 8", .{});
|
||||
}
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = inst;
|
||||
|
||||
const signedness: std.builtin.Signedness = blk: {
|
||||
// by default we tell the operand type is unsigned (i.e. bools and enum values)
|
||||
if (ty.zigTypeTag() != .Int) break :blk .unsigned;
|
||||
|
||||
// incase of an actual integer, we emit the correct signedness
|
||||
break :blk ty.intInfo(self.target.*).signedness;
|
||||
};
|
||||
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const result: MCValue = result: {
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
// lhs should always be a register
|
||||
const rhs_should_be_register = switch (rhs) {
|
||||
.immediate => |imm| imm < 0 or imm > std.math.maxInt(u12),
|
||||
else => true,
|
||||
};
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
defer if (rhs_is_register) self.register_manager.unfreezeRegs(&.{rhs.register});
|
||||
|
||||
var lhs_mcv = lhs;
|
||||
var rhs_mcv = rhs;
|
||||
|
||||
// Allocate registers
|
||||
if (rhs_should_be_register) {
|
||||
if (!lhs_is_register and !rhs_is_register) {
|
||||
const regs = try self.register_manager.allocRegs(2, .{
|
||||
Air.refToIndex(bin_op.lhs).?, Air.refToIndex(bin_op.rhs).?,
|
||||
});
|
||||
lhs_mcv = MCValue{ .register = regs[0] };
|
||||
rhs_mcv = MCValue{ .register = regs[1] };
|
||||
} else if (!rhs_is_register) {
|
||||
rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?) };
|
||||
} else if (!lhs_is_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?) };
|
||||
}
|
||||
} else {
|
||||
if (!lhs_is_register) {
|
||||
lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?) };
|
||||
}
|
||||
}
|
||||
|
||||
// Move the operands to the newly allocated registers
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
if (lhs_mcv == .register and !lhs_is_register) {
|
||||
try self.genSetReg(ty, lhs_mcv.register, lhs);
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs);
|
||||
}
|
||||
if (rhs_mcv == .register and !rhs_is_register) {
|
||||
try self.genSetReg(ty, rhs_mcv.register, rhs);
|
||||
branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs);
|
||||
}
|
||||
|
||||
// The destination register is not present in the cmp instruction
|
||||
// The signedness of the integer does not matter for the cmp instruction
|
||||
switch (rhs_mcv) {
|
||||
.register => |reg| {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_shifted_register,
|
||||
.data = .{ .rrr_imm6_shift = .{
|
||||
.rd = .xzr,
|
||||
.rn = lhs_mcv.register,
|
||||
.rm = reg,
|
||||
.imm6 = 0,
|
||||
.shift = .lsl,
|
||||
} },
|
||||
});
|
||||
},
|
||||
.immediate => |imm| {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_immediate,
|
||||
.data = .{ .r_imm12_sh = .{
|
||||
.rn = lhs_mcv.register,
|
||||
.imm12 = @intCast(u12, imm),
|
||||
} },
|
||||
});
|
||||
var int_buffer: Type.Payload.Bits = undefined;
|
||||
const int_ty = switch (lhs_ty.zigTypeTag()) {
|
||||
.Vector => return self.fail("TODO AArch64 cmp vectors", .{}),
|
||||
.Enum => lhs_ty.intTagType(&int_buffer),
|
||||
.Int => lhs_ty,
|
||||
.Bool => Type.initTag(.u1),
|
||||
.Pointer => Type.usize,
|
||||
.ErrorSet => Type.initTag(.u16),
|
||||
.Optional => blk: {
|
||||
var opt_buffer: Type.Payload.ElemType = undefined;
|
||||
const payload_ty = lhs_ty.optionalChild(&opt_buffer);
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
|
||||
break :blk Type.initTag(.u1);
|
||||
} else if (lhs_ty.isPtrLikeOptional()) {
|
||||
break :blk Type.usize;
|
||||
} else {
|
||||
return self.fail("TODO AArch64 cmp non-pointer optionals", .{});
|
||||
}
|
||||
},
|
||||
.Float => return self.fail("TODO AArch64 cmp floats", .{}),
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
break :result switch (signedness) {
|
||||
.signed => MCValue{ .compare_flags_signed = op },
|
||||
.unsigned => MCValue{ .compare_flags_unsigned = op },
|
||||
};
|
||||
|
||||
const int_info = int_ty.intInfo(self.target.*);
|
||||
if (int_info.bits <= 64) {
|
||||
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = inst;
|
||||
|
||||
break :result switch (int_info.signedness) {
|
||||
.signed => MCValue{ .compare_flags_signed = op },
|
||||
.unsigned => MCValue{ .compare_flags_unsigned = op },
|
||||
};
|
||||
} else {
|
||||
return self.fail("TODO AArch64 cmp for ints > 64 bits", .{});
|
||||
}
|
||||
};
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
@ -3384,18 +3318,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
.compare_flags_signed,
|
||||
.immediate,
|
||||
.ptr_stack_offset,
|
||||
.ptr_embedded_in_code,
|
||||
=> {
|
||||
const reg = try self.copyToTmpRegister(ty, mcv);
|
||||
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
|
||||
},
|
||||
.embedded_in_code => |code_offset| {
|
||||
_ = code_offset;
|
||||
return self.fail("TODO implement set stack variable from embedded_in_code", .{});
|
||||
},
|
||||
.register => |reg| {
|
||||
const adj_off = stack_offset + abi_size;
|
||||
|
||||
switch (abi_size) {
|
||||
1, 2, 4, 8 => {
|
||||
const tag: Mir.Inst.Tag = switch (abi_size) {
|
||||
@ -3410,7 +3337,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
.tag = tag,
|
||||
.data = .{ .load_store_stack = .{
|
||||
.rt = rt,
|
||||
.offset = @intCast(u32, adj_off),
|
||||
.offset = @intCast(u32, stack_offset),
|
||||
} },
|
||||
});
|
||||
},
|
||||
@ -3495,7 +3422,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
|
||||
switch (mcv) {
|
||||
.dead => unreachable,
|
||||
.ptr_embedded_in_code => unreachable,
|
||||
.unreach, .none => return, // Nothing to do.
|
||||
.undef => {
|
||||
if (!self.wantSafety())
|
||||
@ -3507,13 +3433,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
else => unreachable, // unexpected register size
|
||||
}
|
||||
},
|
||||
.ptr_stack_offset => |unadjusted_off| {
|
||||
.ptr_stack_offset => |off| {
|
||||
// TODO: maybe addressing from sp instead of fp
|
||||
const elem_ty = ty.childType();
|
||||
const abi_size = elem_ty.abiSize(self.target.*);
|
||||
const adj_off = unadjusted_off + abi_size;
|
||||
|
||||
const imm12 = math.cast(u12, adj_off) catch
|
||||
const imm12 = math.cast(u12, off) catch
|
||||
return self.fail("TODO larger stack offsets", .{});
|
||||
|
||||
_ = try self.addInst(.{
|
||||
@ -3603,9 +3525,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
try self.genSetReg(ty, reg, .{ .immediate = addr });
|
||||
try self.genLdrRegister(reg, reg, ty.abiSize(self.target.*));
|
||||
},
|
||||
.stack_offset => |unadjusted_off| {
|
||||
.stack_offset => |off| {
|
||||
const abi_size = ty.abiSize(self.target.*);
|
||||
const adj_off = unadjusted_off + abi_size;
|
||||
|
||||
switch (abi_size) {
|
||||
1, 2, 4, 8 => {
|
||||
@ -3625,7 +3546,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
.tag = tag,
|
||||
.data = .{ .load_store_stack = .{
|
||||
.rt = rt,
|
||||
.offset = @intCast(u32, adj_off),
|
||||
.offset = @intCast(u32, off),
|
||||
} },
|
||||
});
|
||||
},
|
||||
@ -3633,7 +3554,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => return self.fail("TODO implement genSetReg for aarch64 {}", .{mcv}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -3661,8 +3581,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ptr_bytes = @divExact(ptr_bits, 8);
|
||||
|
||||
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
|
||||
try self.genSetStack(ptr_ty, stack_offset + ptr_bytes, ptr);
|
||||
try self.genSetStack(Type.initTag(.usize), stack_offset, .{ .immediate = array_len });
|
||||
try self.genSetStack(ptr_ty, stack_offset, ptr);
|
||||
try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len });
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
};
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
|
||||
@ -650,17 +650,32 @@ fn mirLogicalImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
|
||||
const rd = rrr_imm6_shift.rd;
|
||||
const rn = rrr_imm6_shift.rn;
|
||||
const rm = rrr_imm6_shift.rm;
|
||||
const shift = rrr_imm6_shift.shift;
|
||||
const imm6 = rrr_imm6_shift.imm6;
|
||||
|
||||
switch (tag) {
|
||||
.add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.sub_shifted_register => try emit.writeInstruction(Instruction.subShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.add_shifted_register,
|
||||
.sub_shifted_register,
|
||||
=> {
|
||||
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
|
||||
const rd = rrr_imm6_shift.rd;
|
||||
const rn = rrr_imm6_shift.rn;
|
||||
const rm = rrr_imm6_shift.rm;
|
||||
const shift = rrr_imm6_shift.shift;
|
||||
const imm6 = rrr_imm6_shift.imm6;
|
||||
|
||||
switch (tag) {
|
||||
.add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.sub_shifted_register => try emit.writeInstruction(Instruction.subShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.cmp_shifted_register => {
|
||||
const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift;
|
||||
const rn = rr_imm6_shift.rn;
|
||||
const rm = rr_imm6_shift.rm;
|
||||
const shift = rr_imm6_shift.shift;
|
||||
const imm6 = rr_imm6_shift.imm6;
|
||||
|
||||
try emit.writeInstruction(Instruction.subsShiftedRegister(.xzr, rn, rm, shift, imm6));
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -896,8 +911,13 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false));
|
||||
},
|
||||
.mvn => {
|
||||
const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift;
|
||||
try emit.writeInstruction(Instruction.ornShiftedRegister(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, rr_imm6_shift.shift, rr_imm6_shift.imm6));
|
||||
const rr_imm6_logical_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_logical_shift;
|
||||
const rd = rr_imm6_logical_shift.rd;
|
||||
const rm = rr_imm6_logical_shift.rm;
|
||||
const shift = rr_imm6_logical_shift.shift;
|
||||
const imm6 = rr_imm6_logical_shift.imm6;
|
||||
|
||||
try emit.writeInstruction(Instruction.ornShiftedRegister(rd, .xzr, rm, shift, imm6));
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
@ -249,11 +249,20 @@ pub const Inst = struct {
|
||||
imm12: u12,
|
||||
sh: u1 = 0,
|
||||
},
|
||||
/// Two registers and a shift (shift type and 6-bit amount)
|
||||
///
|
||||
/// Used by e.g. cmp_shifted_register
|
||||
rr_imm6_shift: struct {
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
imm6: u6,
|
||||
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
|
||||
},
|
||||
/// Two registers and a shift (logical instruction version)
|
||||
/// (shift type and 6-bit amount)
|
||||
///
|
||||
/// Used by e.g. mvn
|
||||
rr_imm6_shift: struct {
|
||||
rr_imm6_logical_shift: struct {
|
||||
rd: Register,
|
||||
rm: Register,
|
||||
imm6: u6,
|
||||
@ -287,7 +296,7 @@ pub const Inst = struct {
|
||||
},
|
||||
/// Three registers and a shift (shift type and 6-bit amount)
|
||||
///
|
||||
/// Used by e.g. cmp_shifted_register
|
||||
/// Used by e.g. add_shifted_register
|
||||
rrr_imm6_shift: struct {
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
|
||||
@ -33,6 +33,8 @@ test "optional pointer to size zero struct" {
|
||||
}
|
||||
|
||||
test "equality compare optional pointers" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
try testNullPtrsEql();
|
||||
comptime try testNullPtrsEql();
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user