mirror of
https://github.com/ziglang/zig.git
synced 2026-02-12 20:37:54 +00:00
Merge pull request #11574 from ziglang/stage2-aarch64
stage2,aarch64: basic overflow arithmetic support
This commit is contained in:
commit
13d1798ea0
@ -102,9 +102,12 @@ air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init,
|
||||
const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
|
||||
|
||||
const MCValue = union(enum) {
|
||||
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
|
||||
/// TODO Look into deleting this tag and using `dead` instead, since every use
|
||||
/// of MCValue.none should be instead looking at the type and noticing it is 0 bits.
|
||||
/// No runtime bits. `void` types, empty structs, u0, enums with 1
|
||||
/// tag, etc.
|
||||
///
|
||||
/// TODO Look into deleting this tag and using `dead` instead,
|
||||
/// since every use of MCValue.none should be instead looking at
|
||||
/// the type and noticing it is 0 bits.
|
||||
none,
|
||||
/// Control flow will not allow this value to be observed.
|
||||
unreach,
|
||||
@ -113,28 +116,56 @@ const MCValue = union(enum) {
|
||||
/// The value is undefined.
|
||||
undef,
|
||||
/// A pointer-sized integer that fits in a register.
|
||||
/// If the type is a pointer, this is the pointer address in virtual address space.
|
||||
///
|
||||
/// If the type is a pointer, this is the pointer address in
|
||||
/// virtual address space.
|
||||
immediate: u64,
|
||||
/// The value is in a target-specific register.
|
||||
register: Register,
|
||||
/// The value is a tuple { wrapped: u32, overflow: u1 } where
|
||||
/// wrapped is stored in the register and the overflow bit is
|
||||
/// stored in the C flag of the CPSR.
|
||||
///
|
||||
/// This MCValue is only generated by a add_with_overflow or
|
||||
/// sub_with_overflow instruction operating on u32.
|
||||
register_c_flag: Register,
|
||||
/// The value is a tuple { wrapped: i32, overflow: u1 } where
|
||||
/// wrapped is stored in the register and the overflow bit is
|
||||
/// stored in the V flag of the CPSR.
|
||||
///
|
||||
/// This MCValue is only generated by a add_with_overflow or
|
||||
/// sub_with_overflow instruction operating on i32.
|
||||
register_v_flag: Register,
|
||||
/// The value is in memory at a hard-coded address.
|
||||
/// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
///
|
||||
/// If the type is a pointer, it means the pointer address is at
|
||||
/// this memory location.
|
||||
memory: u64,
|
||||
/// The value is in memory referenced indirectly via a GOT entry index.
|
||||
/// If the type is a pointer, it means the pointer is referenced indirectly via GOT.
|
||||
/// When lowered, linker will emit relocations of type ARM64_RELOC_GOT_LOAD_PAGE21 and ARM64_RELOC_GOT_LOAD_PAGEOFF12.
|
||||
/// The value is in memory referenced indirectly via a GOT entry
|
||||
/// index.
|
||||
///
|
||||
/// If the type is a pointer, it means the pointer is referenced
|
||||
/// indirectly via GOT. When lowered, linker will emit
|
||||
/// relocations of type ARM64_RELOC_GOT_LOAD_PAGE21 and
|
||||
/// ARM64_RELOC_GOT_LOAD_PAGEOFF12.
|
||||
got_load: u32,
|
||||
/// The value is in memory referenced directly via symbol index.
|
||||
/// If the type is a pointer, it means the pointer is referenced directly via symbol index.
|
||||
/// When lowered, linker will emit a relocation of type ARM64_RELOC_PAGE21 and ARM64_RELOC_PAGEOFF12.
|
||||
///
|
||||
/// If the type is a pointer, it means the pointer is referenced
|
||||
/// directly via symbol index. When lowered, linker will emit a
|
||||
/// relocation of type ARM64_RELOC_PAGE21 and
|
||||
/// ARM64_RELOC_PAGEOFF12.
|
||||
direct_load: u32,
|
||||
/// The value is one of the stack variables.
|
||||
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
|
||||
///
|
||||
/// If the type is a pointer, it means the pointer address is in
|
||||
/// the stack at this offset.
|
||||
stack_offset: u32,
|
||||
/// The value is a pointer to one of the stack variables (payload is stack offset).
|
||||
/// The value is a pointer to one of the stack variables (payload
|
||||
/// is stack offset).
|
||||
ptr_stack_offset: u32,
|
||||
/// The value is in the compare flags assuming an unsigned operation,
|
||||
/// with this operator applied on top of it.
|
||||
/// The value is in the compare flags assuming an unsigned
|
||||
/// operation, with this operator applied on top of it.
|
||||
compare_flags_unsigned: math.CompareOperator,
|
||||
/// The value is in the compare flags assuming a signed operation,
|
||||
/// with this operator applied on top of it.
|
||||
@ -546,8 +577,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.trunc_float
|
||||
=> try self.airUnaryMath(inst),
|
||||
|
||||
.add_with_overflow => try self.airAddWithOverflow(inst),
|
||||
.sub_with_overflow => try self.airSubWithOverflow(inst),
|
||||
.add_with_overflow => try self.airOverflow(inst),
|
||||
.sub_with_overflow => try self.airOverflow(inst),
|
||||
.mul_with_overflow => try self.airMulWithOverflow(inst),
|
||||
.shl_with_overflow => try self.airShlWithOverflow(inst),
|
||||
|
||||
@ -716,8 +747,13 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
|
||||
branch.inst_table.putAssumeCapacity(inst, .dead);
|
||||
switch (prev_value) {
|
||||
.register => |reg| {
|
||||
const canon_reg = toCanonicalReg(reg);
|
||||
self.register_manager.freeReg(canon_reg);
|
||||
self.register_manager.freeReg(reg);
|
||||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> |reg| {
|
||||
self.register_manager.freeReg(reg);
|
||||
self.compare_flags_inst = null;
|
||||
},
|
||||
.compare_flags_signed, .compare_flags_unsigned => {
|
||||
self.compare_flags_inst = null;
|
||||
@ -857,7 +893,13 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
const stack_mcv = try self.allocRegOrMem(inst, false);
|
||||
log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv });
|
||||
const reg_mcv = self.getResolvedInstValue(inst);
|
||||
assert(reg == toCanonicalReg(reg_mcv.register));
|
||||
switch (reg_mcv) {
|
||||
.register,
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> |r| assert(reg.id() == r.id()),
|
||||
else => unreachable, // not a register
|
||||
}
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
try branch.inst_table.put(self.gpa, inst, stack_mcv);
|
||||
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
|
||||
@ -868,7 +910,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
if (self.compare_flags_inst) |inst_to_save| {
|
||||
const mcv = self.getResolvedInstValue(inst_to_save);
|
||||
assert(mcv == .compare_flags_signed or mcv == .compare_flags_unsigned);
|
||||
switch (mcv) {
|
||||
.compare_flags_signed,
|
||||
.compare_flags_unsigned,
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {},
|
||||
else => unreachable, // mcv doesn't occupy the compare flags
|
||||
}
|
||||
|
||||
const new_mcv = try self.allocRegOrMem(inst_to_save, true);
|
||||
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
|
||||
@ -1245,25 +1294,33 @@ fn binOpRegister(
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
|
||||
const dest_reg = if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_shifted_register => undefined, // cmp has no destination register
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
|
||||
break :blk lhs_reg;
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(inst);
|
||||
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
|
||||
break :blk lhs_reg;
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(inst);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else try self.register_manager.allocReg(null);
|
||||
},
|
||||
};
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
const mir_data: Mir.Inst.Data = switch (mir_tag) {
|
||||
.add_shifted_register,
|
||||
.adds_shifted_register,
|
||||
.sub_shifted_register,
|
||||
.subs_shifted_register,
|
||||
=> .{ .rrr_imm6_shift = .{
|
||||
.rd = dest_reg,
|
||||
.rn = lhs_reg,
|
||||
@ -1286,6 +1343,13 @@ fn binOpRegister(
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
} },
|
||||
.smull,
|
||||
.umull,
|
||||
=> .{ .rrr = .{
|
||||
.rd = dest_reg.to64(),
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
} },
|
||||
.and_shifted_register,
|
||||
.orr_shifted_register,
|
||||
.eor_shifted_register,
|
||||
@ -1368,14 +1432,19 @@ fn binOpImmediate(
|
||||
const raw_reg = try self.register_manager.allocReg(inst);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else try self.register_manager.allocReg(null),
|
||||
} else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
},
|
||||
};
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
|
||||
const mir_data: Mir.Inst.Data = switch (mir_tag) {
|
||||
.add_immediate,
|
||||
.adds_immediate,
|
||||
.sub_immediate,
|
||||
.subs_immediate,
|
||||
=> .{ .rr_imm12_sh = .{
|
||||
.rd = dest_reg,
|
||||
.rn = lhs_reg,
|
||||
@ -1711,24 +1780,421 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
const lhs_ty = self.air.typeOf(extra.lhs);
|
||||
const rhs_ty = self.air.typeOf(extra.rhs);
|
||||
|
||||
fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airSubWithOverflow for {}", .{self.target.cpu.arch});
|
||||
const tuple_ty = self.air.typeOfIndex(inst);
|
||||
const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
|
||||
const tuple_align = tuple_ty.abiAlignment(self.target.*);
|
||||
const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
|
||||
|
||||
switch (lhs_ty.zigTypeTag()) {
|
||||
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
|
||||
.Int => {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
assert(lhs_ty.eql(rhs_ty, mod));
|
||||
const int_info = lhs_ty.intInfo(self.target.*);
|
||||
switch (int_info.bits) {
|
||||
1...31, 33...63 => {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
const base_tag: Air.Inst.Tag = switch (tag) {
|
||||
.add_with_overflow => .add,
|
||||
.sub_with_overflow => .sub,
|
||||
else => unreachable,
|
||||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
|
||||
const raw_truncated_reg = try self.register_manager.allocReg(null);
|
||||
const truncated_reg = registerAlias(raw_truncated_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
|
||||
// sbfx/ubfx truncated, dest, #0, #bits
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
// cmp dest, truncated
|
||||
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
},
|
||||
32, 64 => {
|
||||
// Only say yes if the operation is
|
||||
// commutative, i.e. we can swap both of the
|
||||
// operands
|
||||
const lhs_immediate_ok = switch (tag) {
|
||||
.add_with_overflow => lhs == .immediate and lhs.immediate <= std.math.maxInt(u12),
|
||||
.sub_with_overflow => false,
|
||||
else => unreachable,
|
||||
};
|
||||
const rhs_immediate_ok = switch (tag) {
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
=> rhs == .immediate and rhs.immediate <= std.math.maxInt(u12),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const mir_tag_register: Mir.Inst.Tag = switch (tag) {
|
||||
.add_with_overflow => .adds_shifted_register,
|
||||
.sub_with_overflow => .subs_shifted_register,
|
||||
else => unreachable,
|
||||
};
|
||||
const mir_tag_immediate: Mir.Inst.Tag = switch (tag) {
|
||||
.add_with_overflow => .adds_immediate,
|
||||
.sub_with_overflow => .subs_immediate,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = inst;
|
||||
|
||||
const dest = blk: {
|
||||
if (rhs_immediate_ok) {
|
||||
break :blk try self.binOpImmediate(mir_tag_immediate, null, lhs, rhs, lhs_ty, false);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
break :blk try self.binOpImmediate(mir_tag_immediate, null, rhs, lhs, rhs_ty, true);
|
||||
} else {
|
||||
break :blk try self.binOpRegister(mir_tag_register, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
}
|
||||
};
|
||||
|
||||
switch (int_info.signedness) {
|
||||
.unsigned => break :result MCValue{ .register_c_flag = dest.register },
|
||||
.signed => break :result MCValue{ .register_v_flag = dest.register },
|
||||
}
|
||||
},
|
||||
else => return self.fail("TODO overflow operations on integers > u32/i32", .{}),
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
|
||||
}
|
||||
|
||||
fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airMulWithOverflow for {}", .{self.target.cpu.arch});
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
|
||||
const result: MCValue = result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
const lhs_ty = self.air.typeOf(extra.lhs);
|
||||
const rhs_ty = self.air.typeOf(extra.rhs);
|
||||
|
||||
const tuple_ty = self.air.typeOfIndex(inst);
|
||||
const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
|
||||
const tuple_align = tuple_ty.abiAlignment(self.target.*);
|
||||
const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
|
||||
|
||||
switch (lhs_ty.zigTypeTag()) {
|
||||
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(self.target.*);
|
||||
|
||||
if (int_info.bits <= 32) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
const base_tag: Mir.Inst.Tag = switch (int_info.signedness) {
|
||||
.signed => .smull,
|
||||
.unsigned => .umull,
|
||||
};
|
||||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest_reg = dest.register;
|
||||
self.register_manager.freezeRegs(&.{dest_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
|
||||
try self.truncRegister(
|
||||
dest_reg.to32(),
|
||||
truncated_reg.to32(),
|
||||
int_info.signedness,
|
||||
int_info.bits,
|
||||
);
|
||||
|
||||
switch (int_info.signedness) {
|
||||
.signed => {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_extended_register,
|
||||
.data = .{ .rr_extend_shift = .{
|
||||
.rn = dest_reg.to64(),
|
||||
.rm = truncated_reg.to32(),
|
||||
.ext_type = .sxtw,
|
||||
.imm3 = 0,
|
||||
} },
|
||||
});
|
||||
},
|
||||
.unsigned => {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_extended_register,
|
||||
.data = .{ .rr_extend_shift = .{
|
||||
.rn = dest_reg.to64(),
|
||||
.rm = truncated_reg.to32(),
|
||||
.ext_type = .uxtw,
|
||||
.imm3 = 0,
|
||||
} },
|
||||
});
|
||||
},
|
||||
}
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{
|
||||
.compare_flags_unsigned = .neq,
|
||||
});
|
||||
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
} else if (int_info.bits <= 64) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// TODO this should really be put in a helper similar to `binOpRegister`
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
|
||||
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register});
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{lhs_reg});
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{rhs_reg});
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
// TODO reuse operands
|
||||
const dest_reg = blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
break :blk reg;
|
||||
};
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_reg});
|
||||
|
||||
switch (int_info.signedness) {
|
||||
.signed => {
|
||||
// mul dest, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mul,
|
||||
.data = .{ .rrr = .{
|
||||
.rd = dest_reg,
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
} },
|
||||
});
|
||||
|
||||
const dest_high_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{dest_high_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_high_reg});
|
||||
|
||||
// smulh dest_high, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .smulh,
|
||||
.data = .{ .rrr = .{
|
||||
.rd = dest_high_reg,
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
} },
|
||||
});
|
||||
|
||||
// cmp dest_high, dest, asr #63
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_shifted_register,
|
||||
.data = .{ .rr_imm6_shift = .{
|
||||
.rn = dest_high_reg,
|
||||
.rm = dest_reg,
|
||||
.imm6 = 63,
|
||||
.shift = .asr,
|
||||
} },
|
||||
});
|
||||
|
||||
const shift: u6 = @intCast(u6, @as(u7, 64) - @intCast(u7, int_info.bits));
|
||||
if (shift > 0) {
|
||||
// lsl dest_high, dest, #shift
|
||||
_ = try self.addInst(.{
|
||||
.tag = .lsl_immediate,
|
||||
.data = .{ .rr_shift = .{
|
||||
.rd = dest_high_reg,
|
||||
.rn = dest_reg,
|
||||
.shift = shift,
|
||||
} },
|
||||
});
|
||||
|
||||
// cmp dest, dest_high, #shift
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cmp_shifted_register,
|
||||
.data = .{ .rr_imm6_shift = .{
|
||||
.rn = dest_reg,
|
||||
.rm = dest_high_reg,
|
||||
.imm6 = shift,
|
||||
.shift = .asr,
|
||||
} },
|
||||
});
|
||||
}
|
||||
},
|
||||
.unsigned => {
|
||||
const dest_high_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{dest_high_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{dest_high_reg});
|
||||
|
||||
// umulh dest_high, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .umulh,
|
||||
.data = .{ .rrr = .{
|
||||
.rd = dest_high_reg,
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
} },
|
||||
});
|
||||
|
||||
// mul dest, lhs, rhs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mul,
|
||||
.data = .{ .rrr = .{
|
||||
.rd = dest_reg,
|
||||
.rn = lhs_reg,
|
||||
.rm = rhs_reg,
|
||||
} },
|
||||
});
|
||||
|
||||
_ = try self.binOp(
|
||||
.cmp_eq,
|
||||
null,
|
||||
.{ .register = dest_high_reg },
|
||||
.{ .immediate = 0 },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
);
|
||||
|
||||
if (int_info.bits < 64) {
|
||||
// lsr dest_high, dest, #shift
|
||||
_ = try self.addInst(.{
|
||||
.tag = .lsr_immediate,
|
||||
.data = .{ .rr_shift = .{
|
||||
.rd = dest_high_reg,
|
||||
.rn = dest_reg,
|
||||
.shift = @intCast(u6, int_info.bits),
|
||||
} },
|
||||
});
|
||||
|
||||
_ = try self.binOp(
|
||||
.cmp_eq,
|
||||
null,
|
||||
.{ .register = dest_high_reg },
|
||||
.{ .immediate = 0 },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{truncated_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{truncated_reg});
|
||||
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{
|
||||
.compare_flags_unsigned = .neq,
|
||||
});
|
||||
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
} else return self.fail("TODO implement mul_with_overflow for integers > u64/i64", .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
|
||||
}
|
||||
|
||||
fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airShlWithOverflow for {}", .{self.target.cpu.arch});
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
|
||||
const result: MCValue = result: {
|
||||
const lhs = try self.resolveInst(extra.lhs);
|
||||
const rhs = try self.resolveInst(extra.rhs);
|
||||
const lhs_ty = self.air.typeOf(extra.lhs);
|
||||
const rhs_ty = self.air.typeOf(extra.rhs);
|
||||
|
||||
const tuple_ty = self.air.typeOfIndex(inst);
|
||||
const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
|
||||
const tuple_align = tuple_ty.abiAlignment(self.target.*);
|
||||
const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*));
|
||||
|
||||
switch (lhs_ty.zigTypeTag()) {
|
||||
.Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}),
|
||||
.Int => {
|
||||
const int_info = lhs_ty.intInfo(self.target.*);
|
||||
if (int_info.bits <= 64) {
|
||||
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
|
||||
|
||||
if (lhs == .register) self.register_manager.freezeRegs(&.{lhs.register});
|
||||
defer if (lhs == .register) self.register_manager.unfreezeRegs(&.{lhs.register});
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// lsl dest, lhs, rhs
|
||||
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
|
||||
// asr/lsr reconstructed, dest, rhs
|
||||
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
|
||||
|
||||
// cmp lhs, reconstructed
|
||||
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, dest);
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
|
||||
break :result MCValue{ .stack_offset = stack_offset };
|
||||
} else {
|
||||
return self.fail("TODO overflow operations on integers > u64/i64", .{});
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
|
||||
}
|
||||
|
||||
fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
|
||||
@ -1957,7 +2423,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
switch (elem_size) {
|
||||
else => {
|
||||
const dest = try self.allocRegOrMem(inst, true);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ty, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
|
||||
try self.load(dest, addr, slice_ptr_field_type);
|
||||
|
||||
break :result dest;
|
||||
@ -2085,8 +2551,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.undef => unreachable,
|
||||
.unreach => unreachable,
|
||||
.dead => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.compare_flags_unsigned,
|
||||
.compare_flags_signed,
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> unreachable, // cannot hold an address
|
||||
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
|
||||
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
|
||||
.register => |addr_reg| {
|
||||
@ -2303,8 +2772,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.undef => unreachable,
|
||||
.unreach => unreachable,
|
||||
.dead => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.compare_flags_unsigned,
|
||||
.compare_flags_signed,
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> unreachable, // cannot hold an address
|
||||
.immediate => |imm| {
|
||||
try self.setRegOrMem(value_ty, .{ .memory = imm }, value);
|
||||
},
|
||||
@ -2409,9 +2881,60 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
|
||||
_ = extra;
|
||||
return self.fail("TODO implement codegen struct_field_val", .{});
|
||||
//return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
|
||||
const operand = extra.struct_operand;
|
||||
const index = extra.field_index;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const struct_ty = self.air.typeOf(operand);
|
||||
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
|
||||
|
||||
switch (mcv) {
|
||||
.dead, .unreach => unreachable,
|
||||
.stack_offset => |off| {
|
||||
break :result MCValue{ .stack_offset = off - struct_field_offset };
|
||||
},
|
||||
.memory => |addr| {
|
||||
break :result MCValue{ .memory = addr + struct_field_offset };
|
||||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> |reg| {
|
||||
switch (index) {
|
||||
0 => {
|
||||
// get wrapped value: return register
|
||||
break :result MCValue{ .register = reg };
|
||||
},
|
||||
1 => {
|
||||
// TODO return special MCValue condition flags
|
||||
// get overflow bit: set register to C flag
|
||||
// resp. V flag
|
||||
const raw_dest_reg = try self.register_manager.allocReg(null);
|
||||
const dest_reg = raw_dest_reg.to32();
|
||||
|
||||
// C flag: cset reg, cs
|
||||
// V flag: cset reg, vs
|
||||
_ = try self.addInst(.{
|
||||
.tag = .cset,
|
||||
.data = .{ .r_cond = .{
|
||||
.rd = dest_reg,
|
||||
.cond = switch (mcv) {
|
||||
.register_c_flag => .cs,
|
||||
.register_v_flag => .vs,
|
||||
else => unreachable,
|
||||
},
|
||||
} },
|
||||
});
|
||||
|
||||
break :result MCValue{ .register = dest_reg };
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}),
|
||||
}
|
||||
};
|
||||
|
||||
return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
|
||||
}
|
||||
|
||||
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
@ -2451,7 +2974,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
switch (mcv) {
|
||||
.register => |reg| {
|
||||
self.register_manager.getRegAssumeFree(toCanonicalReg(reg), inst);
|
||||
self.register_manager.getRegAssumeFree(reg, inst);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@ -2516,15 +3039,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
|
||||
|
||||
switch (mc_arg) {
|
||||
.none => continue,
|
||||
.undef => unreachable,
|
||||
.immediate => unreachable,
|
||||
.unreach => unreachable,
|
||||
.dead => unreachable,
|
||||
.memory => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
.got_load => unreachable,
|
||||
.direct_load => unreachable,
|
||||
.register => |reg| {
|
||||
try self.register_manager.getReg(reg, null);
|
||||
try self.genSetReg(arg_ty, reg, arg_mcv);
|
||||
@ -2535,6 +3049,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
|
||||
.ptr_stack_offset => {
|
||||
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -3438,6 +3953,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
|
||||
}
|
||||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> {
|
||||
return self.fail("TODO implement genSetStack {}", .{mcv});
|
||||
},
|
||||
.got_load,
|
||||
.direct_load,
|
||||
.memory,
|
||||
@ -3555,7 +4075,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
.tag = .cset,
|
||||
.data = .{ .r_cond = .{
|
||||
.rd = reg,
|
||||
.cond = condition.negate(),
|
||||
.cond = condition,
|
||||
} },
|
||||
});
|
||||
},
|
||||
@ -3598,6 +4118,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
.data = .{ .rr = .{ .rd = reg, .rn = src_reg } },
|
||||
});
|
||||
},
|
||||
.register_c_flag,
|
||||
.register_v_flag,
|
||||
=> unreachable, // doesn't fit into a register
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> |sym_index| {
|
||||
@ -4199,8 +4722,3 @@ fn registerAlias(reg: Register, size_bytes: u64) Register {
|
||||
unreachable; // TODO handle floating-point registers
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves any aliased registers to the 64-bit wide ones.
|
||||
fn toCanonicalReg(reg: Register) Register {
|
||||
return reg.to64();
|
||||
}
|
||||
|
||||
@ -77,8 +77,10 @@ pub fn emitMir(
|
||||
const inst = @intCast(u32, index);
|
||||
switch (tag) {
|
||||
.add_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
.adds_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
.cmp_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
.sub_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
.subs_immediate => try emit.mirAddSubtractImmediate(inst),
|
||||
|
||||
.asr_register => try emit.mirShiftRegister(inst),
|
||||
.lsl_register => try emit.mirShiftRegister(inst),
|
||||
@ -104,10 +106,19 @@ pub fn emitMir(
|
||||
.call_extern => try emit.mirCallExtern(inst),
|
||||
|
||||
.eor_immediate => try emit.mirLogicalImmediate(inst),
|
||||
.tst_immediate => try emit.mirLogicalImmediate(inst),
|
||||
|
||||
.add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
|
||||
.adds_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
|
||||
.cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
|
||||
.sub_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
|
||||
.subs_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
|
||||
|
||||
.add_extended_register => try emit.mirAddSubtractExtendedRegister(inst),
|
||||
.adds_extended_register => try emit.mirAddSubtractExtendedRegister(inst),
|
||||
.sub_extended_register => try emit.mirAddSubtractExtendedRegister(inst),
|
||||
.subs_extended_register => try emit.mirAddSubtractExtendedRegister(inst),
|
||||
.cmp_extended_register => try emit.mirAddSubtractExtendedRegister(inst),
|
||||
|
||||
.cset => try emit.mirConditionalSelect(inst),
|
||||
|
||||
@ -162,6 +173,10 @@ pub fn emitMir(
|
||||
.movz => try emit.mirMoveWideImmediate(inst),
|
||||
|
||||
.mul => try emit.mirDataProcessing3Source(inst),
|
||||
.smulh => try emit.mirDataProcessing3Source(inst),
|
||||
.smull => try emit.mirDataProcessing3Source(inst),
|
||||
.umulh => try emit.mirDataProcessing3Source(inst),
|
||||
.umull => try emit.mirDataProcessing3Source(inst),
|
||||
|
||||
.nop => try emit.mirNop(),
|
||||
|
||||
@ -454,7 +469,9 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
switch (tag) {
|
||||
.add_immediate,
|
||||
.adds_immediate,
|
||||
.sub_immediate,
|
||||
.subs_immediate,
|
||||
=> {
|
||||
const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh;
|
||||
const rd = rr_imm12_sh.rd;
|
||||
@ -464,7 +481,9 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
switch (tag) {
|
||||
.add_immediate => try emit.writeInstruction(Instruction.add(rd, rn, imm12, sh)),
|
||||
.adds_immediate => try emit.writeInstruction(Instruction.adds(rd, rn, imm12, sh)),
|
||||
.sub_immediate => try emit.writeInstruction(Instruction.sub(rd, rn, imm12, sh)),
|
||||
.subs_immediate => try emit.writeInstruction(Instruction.subs(rd, rn, imm12, sh)),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -666,6 +685,14 @@ fn mirLogicalImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
switch (tag) {
|
||||
.eor_immediate => try emit.writeInstruction(Instruction.eorImmediate(rd, rn, imms, immr, n)),
|
||||
.tst_immediate => {
|
||||
const zr: Register = switch (rd.size()) {
|
||||
32 => .wzr,
|
||||
64 => .xzr,
|
||||
else => unreachable,
|
||||
};
|
||||
try emit.writeInstruction(Instruction.andsImmediate(zr, rn, imms, immr, n));
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -674,7 +701,9 @@ fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
switch (tag) {
|
||||
.add_shifted_register,
|
||||
.adds_shifted_register,
|
||||
.sub_shifted_register,
|
||||
.subs_shifted_register,
|
||||
=> {
|
||||
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
|
||||
const rd = rrr_imm6_shift.rd;
|
||||
@ -685,7 +714,9 @@ fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
switch (tag) {
|
||||
.add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.adds_shifted_register => try emit.writeInstruction(Instruction.addsShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.sub_shifted_register => try emit.writeInstruction(Instruction.subShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
.subs_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -707,6 +738,47 @@ fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn mirAddSubtractExtendedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
switch (tag) {
|
||||
.add_extended_register,
|
||||
.adds_extended_register,
|
||||
.sub_extended_register,
|
||||
.subs_extended_register,
|
||||
=> {
|
||||
const rrr_extend_shift = emit.mir.instructions.items(.data)[inst].rrr_extend_shift;
|
||||
const rd = rrr_extend_shift.rd;
|
||||
const rn = rrr_extend_shift.rn;
|
||||
const rm = rrr_extend_shift.rm;
|
||||
const ext_type = rrr_extend_shift.ext_type;
|
||||
const imm3 = rrr_extend_shift.imm3;
|
||||
|
||||
switch (tag) {
|
||||
.add_extended_register => try emit.writeInstruction(Instruction.addExtendedRegister(rd, rn, rm, ext_type, imm3)),
|
||||
.adds_extended_register => try emit.writeInstruction(Instruction.addsExtendedRegister(rd, rn, rm, ext_type, imm3)),
|
||||
.sub_extended_register => try emit.writeInstruction(Instruction.subExtendedRegister(rd, rn, rm, ext_type, imm3)),
|
||||
.subs_extended_register => try emit.writeInstruction(Instruction.subsExtendedRegister(rd, rn, rm, ext_type, imm3)),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.cmp_extended_register => {
|
||||
const rr_extend_shift = emit.mir.instructions.items(.data)[inst].rr_extend_shift;
|
||||
const rn = rr_extend_shift.rn;
|
||||
const rm = rr_extend_shift.rm;
|
||||
const ext_type = rr_extend_shift.ext_type;
|
||||
const imm3 = rr_extend_shift.imm3;
|
||||
const zr: Register = switch (rn.size()) {
|
||||
32 => .wzr,
|
||||
64 => .xzr,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
try emit.writeInstruction(Instruction.subsExtendedRegister(zr, rn, rm, ext_type, imm3));
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
switch (tag) {
|
||||
@ -717,7 +789,7 @@ fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
64 => .xzr,
|
||||
else => unreachable,
|
||||
};
|
||||
try emit.writeInstruction(Instruction.csinc(r_cond.rd, zr, zr, r_cond.cond));
|
||||
try emit.writeInstruction(Instruction.csinc(r_cond.rd, zr, zr, r_cond.cond.negate()));
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -988,6 +1060,10 @@ fn mirDataProcessing3Source(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
switch (tag) {
|
||||
.mul => try emit.writeInstruction(Instruction.mul(rrr.rd, rrr.rn, rrr.rm)),
|
||||
.smulh => try emit.writeInstruction(Instruction.smulh(rrr.rd, rrr.rn, rrr.rm)),
|
||||
.smull => try emit.writeInstruction(Instruction.smull(rrr.rd, rrr.rn, rrr.rm)),
|
||||
.umulh => try emit.writeInstruction(Instruction.umulh(rrr.rd, rrr.rn, rrr.rm)),
|
||||
.umull => try emit.writeInstruction(Instruction.umull(rrr.rd, rrr.rn, rrr.rm)),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,8 +26,16 @@ pub const Inst = struct {
|
||||
pub const Tag = enum(u16) {
|
||||
/// Add (immediate)
|
||||
add_immediate,
|
||||
/// Add, update condition flags (immediate)
|
||||
adds_immediate,
|
||||
/// Add (shifted register)
|
||||
add_shifted_register,
|
||||
/// Add, update condition flags (shifted register)
|
||||
adds_shifted_register,
|
||||
/// Add (extended register)
|
||||
add_extended_register,
|
||||
/// Add, update condition flags (extended register)
|
||||
adds_extended_register,
|
||||
/// Bitwise AND (shifted register)
|
||||
and_shifted_register,
|
||||
/// Arithmetic Shift Right (immediate)
|
||||
@ -52,6 +60,8 @@ pub const Inst = struct {
|
||||
cmp_immediate,
|
||||
/// Compare (shifted register)
|
||||
cmp_shifted_register,
|
||||
/// Compare (extended register)
|
||||
cmp_extended_register,
|
||||
/// Conditional set
|
||||
cset,
|
||||
/// Pseudo-instruction: End of prologue
|
||||
@ -142,6 +152,10 @@ pub const Inst = struct {
|
||||
ret,
|
||||
/// Signed bitfield extract
|
||||
sbfx,
|
||||
/// Signed multiply high
|
||||
smulh,
|
||||
/// Signed multiply long
|
||||
smull,
|
||||
/// Signed extend byte
|
||||
sxtb,
|
||||
/// Signed extend halfword
|
||||
@ -170,12 +184,26 @@ pub const Inst = struct {
|
||||
strh_register,
|
||||
/// Subtract (immediate)
|
||||
sub_immediate,
|
||||
/// Subtract, update condition flags (immediate)
|
||||
subs_immediate,
|
||||
/// Subtract (shifted register)
|
||||
sub_shifted_register,
|
||||
/// Subtract, update condition flags (shifted register)
|
||||
subs_shifted_register,
|
||||
/// Subtract (extended register)
|
||||
sub_extended_register,
|
||||
/// Subtract, update condition flags (extended register)
|
||||
subs_extended_register,
|
||||
/// Supervisor Call
|
||||
svc,
|
||||
/// Test bits (immediate)
|
||||
tst_immediate,
|
||||
/// Unsigned bitfield extract
|
||||
ubfx,
|
||||
/// Unsigned multiply high
|
||||
umulh,
|
||||
/// Unsigned multiply long
|
||||
umull,
|
||||
/// Unsigned extend byte
|
||||
uxtb,
|
||||
/// Unsigned extend halfword
|
||||
@ -282,6 +310,15 @@ pub const Inst = struct {
|
||||
imm6: u6,
|
||||
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
|
||||
},
|
||||
/// Two registers with sign-extension (extension type and 3-bit shift amount)
|
||||
///
|
||||
/// Used by e.g. cmp_extended_register
|
||||
rr_extend_shift: struct {
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
ext_type: bits.Instruction.AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
},
|
||||
/// Two registers and a shift (logical instruction version)
|
||||
/// (shift type and 6-bit amount)
|
||||
///
|
||||
@ -338,6 +375,16 @@ pub const Inst = struct {
|
||||
imm6: u6,
|
||||
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
|
||||
},
|
||||
/// Three registers with sign-extension (extension type and 3-bit shift amount)
|
||||
///
|
||||
/// Used by e.g. add_extended_register
|
||||
rrr_extend_shift: struct {
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
ext_type: bits.Instruction.AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
},
|
||||
/// Three registers and a shift (logical instruction version)
|
||||
/// (shift type and 6-bit amount)
|
||||
///
|
||||
|
||||
@ -330,6 +330,17 @@ pub const Instruction = union(enum) {
|
||||
op: u1,
|
||||
sf: u1,
|
||||
},
|
||||
add_subtract_extended_register: packed struct {
|
||||
rd: u5,
|
||||
rn: u5,
|
||||
imm3: u3,
|
||||
option: u3,
|
||||
rm: u5,
|
||||
fixed: u8 = 0b01011_00_1,
|
||||
s: u1,
|
||||
op: u1,
|
||||
sf: u1,
|
||||
},
|
||||
conditional_branch: struct {
|
||||
cond: u4,
|
||||
o0: u1,
|
||||
@ -495,6 +506,7 @@ pub const Instruction = union(enum) {
|
||||
.logical_immediate => |v| @bitCast(u32, v),
|
||||
.bitfield => |v| @bitCast(u32, v),
|
||||
.add_subtract_shifted_register => |v| @bitCast(u32, v),
|
||||
.add_subtract_extended_register => |v| @bitCast(u32, v),
|
||||
// TODO once packed structs work, this can be refactored
|
||||
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
|
||||
.compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
|
||||
@ -1006,6 +1018,44 @@ pub const Instruction = union(enum) {
|
||||
};
|
||||
}
|
||||
|
||||
pub const AddSubtractExtendedRegisterOption = enum(u3) {
|
||||
uxtb,
|
||||
uxth,
|
||||
uxtw,
|
||||
uxtx, // serves also as lsl
|
||||
sxtb,
|
||||
sxth,
|
||||
sxtw,
|
||||
sxtx,
|
||||
};
|
||||
|
||||
fn addSubtractExtendedRegister(
|
||||
op: u1,
|
||||
s: u1,
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
extend: AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
) Instruction {
|
||||
return Instruction{
|
||||
.add_subtract_extended_register = .{
|
||||
.rd = rd.enc(),
|
||||
.rn = rn.enc(),
|
||||
.imm3 = imm3,
|
||||
.option = @enumToInt(extend),
|
||||
.rm = rm.enc(),
|
||||
.s = s,
|
||||
.op = op,
|
||||
.sf = switch (rd.size()) {
|
||||
32 => 0b0,
|
||||
64 => 0b1,
|
||||
else => unreachable, // unexpected register size
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn conditionalBranch(
|
||||
o0: u1,
|
||||
o1: u1,
|
||||
@ -1524,6 +1574,48 @@ pub const Instruction = union(enum) {
|
||||
return addSubtractShiftedRegister(0b1, 0b1, shift, rd, rn, rm, imm6);
|
||||
}
|
||||
|
||||
// Add/subtract (extended register)
|
||||
|
||||
pub fn addExtendedRegister(
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
extend: AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
) Instruction {
|
||||
return addSubtractExtendedRegister(0b0, 0b0, rd, rn, rm, extend, imm3);
|
||||
}
|
||||
|
||||
pub fn addsExtendedRegister(
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
extend: AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
) Instruction {
|
||||
return addSubtractExtendedRegister(0b0, 0b1, rd, rn, rm, extend, imm3);
|
||||
}
|
||||
|
||||
pub fn subExtendedRegister(
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
extend: AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
) Instruction {
|
||||
return addSubtractExtendedRegister(0b1, 0b0, rd, rn, rm, extend, imm3);
|
||||
}
|
||||
|
||||
pub fn subsExtendedRegister(
|
||||
rd: Register,
|
||||
rn: Register,
|
||||
rm: Register,
|
||||
extend: AddSubtractExtendedRegisterOption,
|
||||
imm3: u3,
|
||||
) Instruction {
|
||||
return addSubtractExtendedRegister(0b1, 0b1, rd, rn, rm, extend, imm3);
|
||||
}
|
||||
|
||||
// Conditional branch
|
||||
|
||||
pub fn bCond(cond: Condition, offset: i21) Instruction {
|
||||
@ -1564,6 +1656,16 @@ pub const Instruction = union(enum) {
|
||||
return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra);
|
||||
}
|
||||
|
||||
pub fn smaddl(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
|
||||
assert(rd.size() == 64 and rn.size() == 32 and rm.size() == 32 and ra.size() == 64);
|
||||
return dataProcessing3Source(0b00, 0b001, 0b0, rd, rn, rm, ra);
|
||||
}
|
||||
|
||||
pub fn umaddl(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
|
||||
assert(rd.size() == 64 and rn.size() == 32 and rm.size() == 32 and ra.size() == 64);
|
||||
return dataProcessing3Source(0b00, 0b101, 0b0, rd, rn, rm, ra);
|
||||
}
|
||||
|
||||
pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
|
||||
return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra);
|
||||
}
|
||||
@ -1572,6 +1674,24 @@ pub const Instruction = union(enum) {
|
||||
return madd(rd, rn, rm, .xzr);
|
||||
}
|
||||
|
||||
pub fn smull(rd: Register, rn: Register, rm: Register) Instruction {
|
||||
return smaddl(rd, rn, rm, .xzr);
|
||||
}
|
||||
|
||||
pub fn smulh(rd: Register, rn: Register, rm: Register) Instruction {
|
||||
assert(rd.size() == 64);
|
||||
return dataProcessing3Source(0b00, 0b010, 0b0, rd, rn, rm, .xzr);
|
||||
}
|
||||
|
||||
pub fn umull(rd: Register, rn: Register, rm: Register) Instruction {
|
||||
return umaddl(rd, rn, rm, .xzr);
|
||||
}
|
||||
|
||||
pub fn umulh(rd: Register, rn: Register, rm: Register) Instruction {
|
||||
assert(rd.size() == 64);
|
||||
return dataProcessing3Source(0b00, 0b110, 0b0, rd, rn, rm, .xzr);
|
||||
}
|
||||
|
||||
pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
|
||||
return msub(rd, rn, rm, .xzr);
|
||||
}
|
||||
@ -1790,6 +1910,30 @@ test "serialize instructions" {
|
||||
.inst = Instruction.lsrImmediate(.x4, .x2, 63),
|
||||
.expected = 0b1_10_100110_1_111111_111111_00010_00100,
|
||||
},
|
||||
.{ // umull x0, w0, w1
|
||||
.inst = Instruction.umull(.x0, .w0, .w1),
|
||||
.expected = 0b1_00_11011_1_01_00001_0_11111_00000_00000,
|
||||
},
|
||||
.{ // smull x0, w0, w1
|
||||
.inst = Instruction.smull(.x0, .w0, .w1),
|
||||
.expected = 0b1_00_11011_0_01_00001_0_11111_00000_00000,
|
||||
},
|
||||
.{ // tst x0, #0xffffffff00000000
|
||||
.inst = Instruction.andsImmediate(.xzr, .x0, 0b011111, 0b100000, 0b1),
|
||||
.expected = 0b1_11_100100_1_100000_011111_00000_11111,
|
||||
},
|
||||
.{ // umulh x0, x1, x2
|
||||
.inst = Instruction.umulh(.x0, .x1, .x2),
|
||||
.expected = 0b1_00_11011_1_10_00010_0_11111_00001_00000,
|
||||
},
|
||||
.{ // smulh x0, x1, x2
|
||||
.inst = Instruction.smulh(.x0, .x1, .x2),
|
||||
.expected = 0b1_00_11011_0_10_00010_0_11111_00001_00000,
|
||||
},
|
||||
.{ // adds x0, x1, x2, sxtx
|
||||
.inst = Instruction.addsExtendedRegister(.x0, .x1, .x2, .sxtx, 0),
|
||||
.expected = 0b1_0_1_01011_00_1_00010_111_000_00001_00000,
|
||||
},
|
||||
};
|
||||
|
||||
for (testcases) |case| {
|
||||
|
||||
@ -1989,7 +1989,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
},
|
||||
else => {
|
||||
const dest = try self.allocRegOrMem(inst, true);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ty, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
|
||||
try self.load(dest, addr, slice_ptr_field_type);
|
||||
|
||||
break :result dest;
|
||||
|
||||
@ -624,7 +624,6 @@ test "128-bit multiplication" {
|
||||
|
||||
test "@addWithOverflow" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
var result: u8 = undefined;
|
||||
try expect(@addWithOverflow(u8, 250, 100, &result));
|
||||
@ -665,9 +664,8 @@ test "small int addition" {
|
||||
try expect(result == 0);
|
||||
}
|
||||
|
||||
test "@mulWithOverflow" {
|
||||
test "basic @mulWithOverflow" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
var result: u8 = undefined;
|
||||
try expect(@mulWithOverflow(u8, 86, 3, &result));
|
||||
@ -685,9 +683,209 @@ test "@mulWithOverflow" {
|
||||
try expect(result == 236);
|
||||
}
|
||||
|
||||
// TODO migrate to this for all backends once they handle more cases
|
||||
test "extensive @mulWithOverflow" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
|
||||
{
|
||||
var a: u5 = 3;
|
||||
var b: u5 = 10;
|
||||
var res: u5 = undefined;
|
||||
try expect(!@mulWithOverflow(u5, a, b, &res));
|
||||
try expect(res == 30);
|
||||
|
||||
b = 11;
|
||||
try expect(@mulWithOverflow(u5, a, b, &res));
|
||||
try expect(res == 1);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i5 = 3;
|
||||
var b: i5 = -5;
|
||||
var res: i5 = undefined;
|
||||
try expect(!@mulWithOverflow(i5, a, b, &res));
|
||||
try expect(res == -15);
|
||||
|
||||
b = -6;
|
||||
try expect(@mulWithOverflow(i5, a, b, &res));
|
||||
try expect(res == 14);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u8 = 3;
|
||||
var b: u8 = 85;
|
||||
var res: u8 = undefined;
|
||||
|
||||
try expect(!@mulWithOverflow(u8, a, b, &res));
|
||||
try expect(res == 255);
|
||||
|
||||
b = 86;
|
||||
try expect(@mulWithOverflow(u8, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i8 = 3;
|
||||
var b: i8 = -42;
|
||||
var res: i8 = undefined;
|
||||
try expect(!@mulWithOverflow(i8, a, b, &res));
|
||||
try expect(res == -126);
|
||||
|
||||
b = -43;
|
||||
try expect(@mulWithOverflow(i8, a, b, &res));
|
||||
try expect(res == 127);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u14 = 3;
|
||||
var b: u14 = 0x1555;
|
||||
var res: u14 = undefined;
|
||||
try expect(!@mulWithOverflow(u14, a, b, &res));
|
||||
try expect(res == 0x3fff);
|
||||
|
||||
b = 0x1556;
|
||||
try expect(@mulWithOverflow(u14, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i14 = 3;
|
||||
var b: i14 = -0xaaa;
|
||||
var res: i14 = undefined;
|
||||
try expect(!@mulWithOverflow(i14, a, b, &res));
|
||||
try expect(res == -0x1ffe);
|
||||
|
||||
b = -0xaab;
|
||||
try expect(@mulWithOverflow(i14, a, b, &res));
|
||||
try expect(res == 0x1fff);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u16 = 3;
|
||||
var b: u16 = 0x5555;
|
||||
var res: u16 = undefined;
|
||||
try expect(!@mulWithOverflow(u16, a, b, &res));
|
||||
try expect(res == 0xffff);
|
||||
|
||||
b = 0x5556;
|
||||
try expect(@mulWithOverflow(u16, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i16 = 3;
|
||||
var b: i16 = -0x2aaa;
|
||||
var res: i16 = undefined;
|
||||
try expect(!@mulWithOverflow(i16, a, b, &res));
|
||||
try expect(res == -0x7ffe);
|
||||
|
||||
b = -0x2aab;
|
||||
try expect(@mulWithOverflow(i16, a, b, &res));
|
||||
try expect(res == 0x7fff);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u30 = 3;
|
||||
var b: u30 = 0x15555555;
|
||||
var res: u30 = undefined;
|
||||
try expect(!@mulWithOverflow(u30, a, b, &res));
|
||||
try expect(res == 0x3fffffff);
|
||||
|
||||
b = 0x15555556;
|
||||
try expect(@mulWithOverflow(u30, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i30 = 3;
|
||||
var b: i30 = -0xaaaaaaa;
|
||||
var res: i30 = undefined;
|
||||
try expect(!@mulWithOverflow(i30, a, b, &res));
|
||||
try expect(res == -0x1ffffffe);
|
||||
|
||||
b = -0xaaaaaab;
|
||||
try expect(@mulWithOverflow(i30, a, b, &res));
|
||||
try expect(res == 0x1fffffff);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u32 = 3;
|
||||
var b: u32 = 0x55555555;
|
||||
var res: u32 = undefined;
|
||||
try expect(!@mulWithOverflow(u32, a, b, &res));
|
||||
try expect(res == 0xffffffff);
|
||||
|
||||
b = 0x55555556;
|
||||
try expect(@mulWithOverflow(u32, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i32 = 3;
|
||||
var b: i32 = -0x2aaaaaaa;
|
||||
var res: i32 = undefined;
|
||||
try expect(!@mulWithOverflow(i32, a, b, &res));
|
||||
try expect(res == -0x7ffffffe);
|
||||
|
||||
b = -0x2aaaaaab;
|
||||
try expect(@mulWithOverflow(i32, a, b, &res));
|
||||
try expect(res == 0x7fffffff);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u62 = 3;
|
||||
var b: u62 = 0x1555555555555555;
|
||||
var res: u62 = undefined;
|
||||
try expect(!@mulWithOverflow(u62, a, b, &res));
|
||||
try expect(res == 0x3fffffffffffffff);
|
||||
|
||||
b = 0x1555555555555556;
|
||||
try expect(@mulWithOverflow(u62, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i62 = 3;
|
||||
var b: i62 = -0xaaaaaaaaaaaaaaa;
|
||||
var res: i62 = undefined;
|
||||
try expect(!@mulWithOverflow(i62, a, b, &res));
|
||||
try expect(res == -0x1ffffffffffffffe);
|
||||
|
||||
b = -0xaaaaaaaaaaaaaab;
|
||||
try expect(@mulWithOverflow(i62, a, b, &res));
|
||||
try expect(res == 0x1fffffffffffffff);
|
||||
}
|
||||
|
||||
{
|
||||
var a: u64 = 3;
|
||||
var b: u64 = 0x5555555555555555;
|
||||
var res: u64 = undefined;
|
||||
try expect(!@mulWithOverflow(u64, a, b, &res));
|
||||
try expect(res == 0xffffffffffffffff);
|
||||
|
||||
b = 0x5555555555555556;
|
||||
try expect(@mulWithOverflow(u64, a, b, &res));
|
||||
try expect(res == 2);
|
||||
}
|
||||
|
||||
{
|
||||
var a: i64 = 3;
|
||||
var b: i64 = -0x2aaaaaaaaaaaaaaa;
|
||||
var res: i64 = undefined;
|
||||
try expect(!@mulWithOverflow(i64, a, b, &res));
|
||||
try expect(res == -0x7ffffffffffffffe);
|
||||
|
||||
b = -0x2aaaaaaaaaaaaaab;
|
||||
try expect(@mulWithOverflow(i64, a, b, &res));
|
||||
try expect(res == 0x7fffffffffffffff);
|
||||
}
|
||||
}
|
||||
|
||||
test "@subWithOverflow" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
var result: u8 = undefined;
|
||||
try expect(@subWithOverflow(u8, 1, 2, &result));
|
||||
@ -707,7 +905,6 @@ test "@subWithOverflow" {
|
||||
test "@shlWithOverflow" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
var result: u16 = undefined;
|
||||
try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result));
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user