mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 00:08:56 +00:00
stage2: fix pointer arithmetic result type
This makes it so the result of doing pointer arithmetic creates a new pointer type that has adjusted alignment.
This commit is contained in:
parent
95f5e17e49
commit
b6798c26ef
@ -345,7 +345,7 @@ const PageAllocator = struct {
|
||||
// Unmap extra pages
|
||||
const aligned_buffer_len = alloc_len - drop_len;
|
||||
if (aligned_buffer_len > aligned_len) {
|
||||
os.munmap(result_ptr[aligned_len..aligned_buffer_len]);
|
||||
os.munmap(@alignCast(mem.page_size, result_ptr[aligned_len..aligned_buffer_len]));
|
||||
}
|
||||
|
||||
const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);
|
||||
|
||||
@ -113,13 +113,13 @@ pub const Inst = struct {
|
||||
/// The offset is in element type units, not bytes.
|
||||
/// Wrapping is undefined behavior.
|
||||
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
|
||||
/// Uses the `bin_op` field.
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`.
|
||||
ptr_add,
|
||||
/// Subtract an offset from a pointer, returning a new pointer.
|
||||
/// The offset is in element type units, not bytes.
|
||||
/// Wrapping is undefined behavior.
|
||||
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
|
||||
/// Uses the `bin_op` field.
|
||||
/// Uses the `ty_pl` field. Payload is `Bin`.
|
||||
ptr_sub,
|
||||
/// Given two operands which can be floats, integers, or vectors, returns the
|
||||
/// greater of the operands. For vectors it operates element-wise.
|
||||
@ -916,8 +916,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
|
||||
.bit_and,
|
||||
.bit_or,
|
||||
.xor,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
.shr,
|
||||
.shr_exact,
|
||||
.shl,
|
||||
@ -989,6 +987,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
=> return air.getRefType(datas[inst].ty_pl.ty),
|
||||
|
||||
.not,
|
||||
|
||||
@ -312,8 +312,6 @@ fn analyzeInst(
|
||||
.div_exact,
|
||||
.rem,
|
||||
.mod,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
.bit_and,
|
||||
.bit_or,
|
||||
.xor,
|
||||
@ -441,6 +439,21 @@ fn analyzeInst(
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
|
||||
},
|
||||
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
.ptr_elem_ptr,
|
||||
.slice_elem_ptr,
|
||||
.slice,
|
||||
=> {
|
||||
const ty_pl = inst_datas[inst].ty_pl;
|
||||
const extra = a.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
|
||||
},
|
||||
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
=> {
|
||||
@ -529,10 +542,6 @@ fn analyzeInst(
|
||||
const extra = a.air.extraData(Air.FieldParentPtr, inst_datas[inst].ty_pl.payload).data;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ extra.field_ptr, .none, .none });
|
||||
},
|
||||
.ptr_elem_ptr, .slice_elem_ptr, .slice => {
|
||||
const extra = a.air.extraData(Air.Bin, inst_datas[inst].ty_pl.payload).data;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
|
||||
},
|
||||
.cmpxchg_strong, .cmpxchg_weak => {
|
||||
const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value });
|
||||
@ -558,15 +567,7 @@ fn analyzeInst(
|
||||
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
|
||||
},
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
=> {
|
||||
const ty_pl = inst_datas[inst].ty_pl;
|
||||
const extra = a.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
|
||||
},
|
||||
|
||||
.br => {
|
||||
const br = inst_datas[inst].br;
|
||||
return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none });
|
||||
|
||||
70
src/Sema.zig
70
src/Sema.zig
@ -10610,28 +10610,55 @@ fn analyzePtrArithmetic(
|
||||
// TODO if the operand is comptime-known to be negative, or is a negative int,
|
||||
// coerce to isize instead of usize.
|
||||
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
|
||||
// TODO adjust the return type according to alignment and other factors
|
||||
const target = sema.mod.getTarget();
|
||||
const runtime_src = rs: {
|
||||
if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| {
|
||||
if (try sema.resolveMaybeUndefVal(block, offset_src, offset)) |offset_val| {
|
||||
const ptr_ty = sema.typeOf(ptr);
|
||||
const new_ptr_ty = ptr_ty; // TODO modify alignment
|
||||
const opt_ptr_val = try sema.resolveMaybeUndefVal(block, ptr_src, ptr);
|
||||
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
|
||||
const ptr_ty = sema.typeOf(ptr);
|
||||
const ptr_info = ptr_ty.ptrInfo().data;
|
||||
const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array)
|
||||
ptr_info.pointee_type.childType()
|
||||
else
|
||||
ptr_info.pointee_type;
|
||||
|
||||
if (ptr_val.isUndef() or offset_val.isUndef()) {
|
||||
return sema.addConstUndef(new_ptr_ty);
|
||||
}
|
||||
const new_ptr_ty = t: {
|
||||
// Calculate the new pointer alignment.
|
||||
if (ptr_info.@"align" == 0) {
|
||||
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
|
||||
break :t ptr_ty;
|
||||
}
|
||||
// If the addend is not a comptime-known value we can still count on
|
||||
// it being a multiple of the type size.
|
||||
const elem_size = elem_ty.abiSize(target);
|
||||
const addend = if (opt_off_val) |off_val| a: {
|
||||
const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target));
|
||||
break :a elem_size * off_int;
|
||||
} else elem_size;
|
||||
|
||||
// The resulting pointer is aligned to the lcd between the offset (an
|
||||
// arbitrary number) and the alignment factor (always a power of two,
|
||||
// non zero).
|
||||
const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, addend | ptr_info.@"align"));
|
||||
|
||||
break :t try Type.ptr(sema.arena, sema.mod, .{
|
||||
.pointee_type = ptr_info.pointee_type,
|
||||
.sentinel = ptr_info.sentinel,
|
||||
.@"align" = new_align,
|
||||
.@"addrspace" = ptr_info.@"addrspace",
|
||||
.mutable = ptr_info.mutable,
|
||||
.@"allowzero" = ptr_info.@"allowzero",
|
||||
.@"volatile" = ptr_info.@"volatile",
|
||||
.size = ptr_info.size,
|
||||
});
|
||||
};
|
||||
|
||||
const runtime_src = rs: {
|
||||
if (opt_ptr_val) |ptr_val| {
|
||||
if (opt_off_val) |offset_val| {
|
||||
if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty);
|
||||
|
||||
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target));
|
||||
// TODO I tried to put this check earlier but it the LLVM backend generate invalid instructinons
|
||||
if (offset_int == 0) return ptr;
|
||||
if (try ptr_val.getUnsignedIntAdvanced(target, sema.kit(block, ptr_src))) |addr| {
|
||||
const ptr_child_ty = ptr_ty.childType();
|
||||
const elem_ty = if (ptr_ty.isSinglePointer() and ptr_child_ty.zigTypeTag() == .Array)
|
||||
ptr_child_ty.childType()
|
||||
else
|
||||
ptr_child_ty;
|
||||
|
||||
const elem_size = elem_ty.abiSize(target);
|
||||
const new_addr = switch (air_tag) {
|
||||
.ptr_add => addr + elem_size * offset_int,
|
||||
@ -10651,7 +10678,16 @@ fn analyzePtrArithmetic(
|
||||
};
|
||||
|
||||
try sema.requireRuntimeBlock(block, runtime_src);
|
||||
return block.addBinOp(air_tag, ptr, offset);
|
||||
return block.addInst(.{
|
||||
.tag = air_tag,
|
||||
.data = .{ .ty_pl = .{
|
||||
.ty = try sema.addType(new_ptr_ty),
|
||||
.payload = try sema.addExtra(Air.Bin{
|
||||
.lhs = ptr,
|
||||
.rhs = offset,
|
||||
}),
|
||||
} },
|
||||
});
|
||||
}
|
||||
|
||||
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
|
||||
@ -545,18 +545,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
|
||||
switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add, .ptr_add => try self.airBinOp(inst),
|
||||
.addwrap => try self.airBinOp(inst),
|
||||
.add => try self.airBinOp(inst, .add),
|
||||
.addwrap => try self.airBinOp(inst, .addwrap),
|
||||
.sub => try self.airBinOp(inst, .sub),
|
||||
.subwrap => try self.airBinOp(inst, .subwrap),
|
||||
.mul => try self.airBinOp(inst, .mul),
|
||||
.mulwrap => try self.airBinOp(inst, .mulwrap),
|
||||
.shl => try self.airBinOp(inst, .shl),
|
||||
.shl_exact => try self.airBinOp(inst, .shl_exact),
|
||||
.bool_and => try self.airBinOp(inst, .bool_and),
|
||||
.bool_or => try self.airBinOp(inst, .bool_or),
|
||||
.bit_and => try self.airBinOp(inst, .bit_and),
|
||||
.bit_or => try self.airBinOp(inst, .bit_or),
|
||||
.xor => try self.airBinOp(inst, .xor),
|
||||
.shr => try self.airBinOp(inst, .shr),
|
||||
.shr_exact => try self.airBinOp(inst, .shr_exact),
|
||||
|
||||
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
|
||||
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
|
||||
|
||||
.add_sat => try self.airAddSat(inst),
|
||||
.sub, .ptr_sub => try self.airBinOp(inst),
|
||||
.subwrap => try self.airBinOp(inst),
|
||||
.sub_sat => try self.airSubSat(inst),
|
||||
.mul => try self.airBinOp(inst),
|
||||
.mulwrap => try self.airBinOp(inst),
|
||||
.mul_sat => try self.airMulSat(inst),
|
||||
.rem => try self.airRem(inst),
|
||||
.mod => try self.airMod(inst),
|
||||
.shl, .shl_exact => try self.airBinOp(inst),
|
||||
.shl_sat => try self.airShlSat(inst),
|
||||
.min => try self.airMin(inst),
|
||||
.max => try self.airMax(inst),
|
||||
@ -595,13 +607,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.cmp_vector => try self.airCmpVector(inst),
|
||||
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
|
||||
|
||||
.bool_and => try self.airBinOp(inst),
|
||||
.bool_or => try self.airBinOp(inst),
|
||||
.bit_and => try self.airBinOp(inst),
|
||||
.bit_or => try self.airBinOp(inst),
|
||||
.xor => try self.airBinOp(inst),
|
||||
.shr, .shr_exact => try self.airBinOp(inst),
|
||||
|
||||
.alloc => try self.airAlloc(inst),
|
||||
.ret_ptr => try self.airRetPtr(inst),
|
||||
.arg => try self.airArg(inst),
|
||||
@ -1260,11 +1265,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn binOpRegister(
|
||||
self: *Self,
|
||||
mir_tag: Mir.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
@ -1284,9 +1289,8 @@ fn binOpRegister(
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
break :inst Air.refToIndex(bin_op.lhs).?;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(md.lhs).?;
|
||||
} else null;
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
@ -1300,9 +1304,8 @@ fn binOpRegister(
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
break :inst Air.refToIndex(bin_op.rhs).?;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(md.rhs).?;
|
||||
} else null;
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst);
|
||||
@ -1317,15 +1320,13 @@ fn binOpRegister(
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_shifted_register => undefined, // cmp has no destination register
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
|
||||
else => if (metadata) |md| blk: {
|
||||
if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
|
||||
break :blk lhs_reg;
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(inst);
|
||||
const raw_reg = try self.register_manager.allocReg(md.inst);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else blk: {
|
||||
@ -1407,11 +1408,11 @@ fn binOpRegister(
|
||||
fn binOpImmediate(
|
||||
self: *Self,
|
||||
mir_tag: Mir.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
lhs_and_rhs_swapped: bool,
|
||||
metadata: ?BinOpMetadata,
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
@ -1424,10 +1425,9 @@ fn binOpImmediate(
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
|
||||
).?;
|
||||
} else null;
|
||||
|
||||
@ -1443,18 +1443,16 @@ fn binOpImmediate(
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp_immediate => undefined, // cmp has no destination register
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
else => if (metadata) |md| blk: {
|
||||
if (lhs_is_register and self.reuseOperand(
|
||||
inst,
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
md.inst,
|
||||
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
|
||||
if (lhs_and_rhs_swapped) 1 else 0,
|
||||
lhs,
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(inst);
|
||||
const raw_reg = try self.register_manager.allocReg(md.inst);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else blk: {
|
||||
@ -1498,6 +1496,12 @@ fn binOpImmediate(
|
||||
return MCValue{ .register = dest_reg };
|
||||
}
|
||||
|
||||
const BinOpMetadata = struct {
|
||||
inst: Air.Inst.Index,
|
||||
lhs: Air.Inst.Ref,
|
||||
rhs: Air.Inst.Ref,
|
||||
};
|
||||
|
||||
/// For all your binary operation needs, this function will generate
|
||||
/// the corresponding Mir instruction(s). Returns the location of the
|
||||
/// result.
|
||||
@ -1513,11 +1517,11 @@ fn binOpImmediate(
|
||||
fn binOp(
|
||||
self: *Self,
|
||||
tag: Air.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) InnerError!MCValue {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
switch (tag) {
|
||||
@ -1562,12 +1566,12 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag_immediate, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, rhs, lhs, rhs_ty, true);
|
||||
return try self.binOpImmediate(mir_tag_immediate, rhs, lhs, rhs_ty, true, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
@ -1586,7 +1590,7 @@ fn binOp(
|
||||
// TODO add optimisations for multiplication
|
||||
// with immediates, for example a * 2 can be
|
||||
// lowered to a << 1
|
||||
return try self.binOpRegister(.mul, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
}
|
||||
@ -1606,7 +1610,7 @@ fn binOp(
|
||||
};
|
||||
|
||||
// Generate an add/sub/mul
|
||||
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (lhs_ty.zigTypeTag()) {
|
||||
@ -1642,7 +1646,7 @@ fn binOp(
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
}
|
||||
@ -1678,9 +1682,9 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag_immediate, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
@ -1699,7 +1703,7 @@ fn binOp(
|
||||
};
|
||||
|
||||
// Generate a shl_exact/shr_exact
|
||||
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (tag) {
|
||||
@ -1735,7 +1739,7 @@ fn binOp(
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1759,12 +1763,12 @@ fn binOp(
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
} else {
|
||||
// convert the offset into a byte offset by
|
||||
// multiplying it with elem_size
|
||||
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
|
||||
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
|
||||
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
|
||||
const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
|
||||
return addr;
|
||||
}
|
||||
},
|
||||
@ -1775,8 +1779,7 @@ fn binOp(
|
||||
}
|
||||
}
|
||||
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
@ -1786,7 +1789,30 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
|
||||
.inst = inst,
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
});
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
|
||||
.inst = inst,
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
});
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
@ -1841,7 +1867,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.sub_with_overflow => .sub,
|
||||
else => unreachable,
|
||||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
@ -1855,7 +1881,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
// cmp dest, truncated
|
||||
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
|
||||
_ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
@ -1894,12 +1920,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const dest = blk: {
|
||||
if (rhs_immediate_ok) {
|
||||
break :blk try self.binOpImmediate(mir_tag_immediate, null, lhs, rhs, lhs_ty, false);
|
||||
break :blk try self.binOpImmediate(mir_tag_immediate, lhs, rhs, lhs_ty, false, null);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
break :blk try self.binOpImmediate(mir_tag_immediate, null, rhs, lhs, rhs_ty, true);
|
||||
break :blk try self.binOpImmediate(mir_tag_immediate, rhs, lhs, rhs_ty, true, null);
|
||||
} else {
|
||||
break :blk try self.binOpRegister(mir_tag_register, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
break :blk try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1952,7 +1978,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.unsigned => .umull,
|
||||
};
|
||||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
@ -2136,11 +2162,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
_ = try self.binOp(
|
||||
.cmp_eq,
|
||||
null,
|
||||
.{ .register = dest_high_reg },
|
||||
.{ .immediate = 0 },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
null,
|
||||
);
|
||||
|
||||
if (int_info.bits < 64) {
|
||||
@ -2156,11 +2182,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
_ = try self.binOp(
|
||||
.cmp_eq,
|
||||
null,
|
||||
.{ .register = dest_high_reg },
|
||||
.{ .immediate = 0 },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
null,
|
||||
);
|
||||
}
|
||||
},
|
||||
@ -2218,16 +2244,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// lsl dest, lhs, rhs
|
||||
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
// asr/lsr reconstructed, dest, rhs
|
||||
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
|
||||
const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null);
|
||||
|
||||
// cmp lhs, reconstructed
|
||||
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
|
||||
_ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, dest);
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{
|
||||
@ -2489,7 +2515,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
switch (elem_size) {
|
||||
else => {
|
||||
const dest = try self.allocRegOrMem(inst, true);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
|
||||
try self.load(dest, addr, slice_ptr_field_type);
|
||||
|
||||
break :result dest;
|
||||
@ -2933,11 +2959,11 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
null,
|
||||
.{ .register = addr_reg },
|
||||
.{ .register = offset_reg },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
null,
|
||||
);
|
||||
|
||||
break :result dest;
|
||||
@ -3302,7 +3328,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
|
||||
const int_info = int_ty.intInfo(self.target.*);
|
||||
if (int_info.bits <= 64) {
|
||||
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
|
||||
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
|
||||
.inst = inst,
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
});
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = inst;
|
||||
|
||||
@ -552,21 +552,34 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
|
||||
switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add, .ptr_add => try self.airBinOp(inst),
|
||||
.addwrap => try self.airBinOp(inst),
|
||||
.add, => try self.airBinOp(inst, .add),
|
||||
.addwrap => try self.airBinOp(inst, .addwrap),
|
||||
.sub, => try self.airBinOp(inst, .sub),
|
||||
.subwrap => try self.airBinOp(inst, .subwrap),
|
||||
.mul => try self.airBinOp(inst, .mul),
|
||||
.mulwrap => try self.airBinOp(inst, .mulwrap),
|
||||
.shl => try self.airBinOp(inst, .shl),
|
||||
.shl_exact => try self.airBinOp(inst, .shl_exact),
|
||||
.bool_and => try self.airBinOp(inst, .bool_and),
|
||||
.bool_or => try self.airBinOp(inst, .bool_or),
|
||||
.bit_and => try self.airBinOp(inst, .bit_and),
|
||||
.bit_or => try self.airBinOp(inst, .bit_or),
|
||||
.xor => try self.airBinOp(inst, .xor),
|
||||
.shr => try self.airBinOp(inst, .shr),
|
||||
.shr_exact => try self.airBinOp(inst, .shr_exact),
|
||||
|
||||
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
|
||||
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
|
||||
|
||||
.min => try self.airMinMax(inst),
|
||||
.max => try self.airMinMax(inst),
|
||||
|
||||
.add_sat => try self.airAddSat(inst),
|
||||
.sub, .ptr_sub => try self.airBinOp(inst),
|
||||
.subwrap => try self.airBinOp(inst),
|
||||
.sub_sat => try self.airSubSat(inst),
|
||||
.mul => try self.airBinOp(inst),
|
||||
.mulwrap => try self.airBinOp(inst),
|
||||
.mul_sat => try self.airMulSat(inst),
|
||||
.rem => try self.airRem(inst),
|
||||
.mod => try self.airMod(inst),
|
||||
.shl, .shl_exact => try self.airBinOp(inst),
|
||||
.shl_sat => try self.airShlSat(inst),
|
||||
.min => try self.airMinMax(inst),
|
||||
.max => try self.airMinMax(inst),
|
||||
.slice => try self.airSlice(inst),
|
||||
|
||||
.sqrt,
|
||||
@ -602,13 +615,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.cmp_vector => try self.airCmpVector(inst),
|
||||
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
|
||||
|
||||
.bool_and => try self.airBinOp(inst),
|
||||
.bool_or => try self.airBinOp(inst),
|
||||
.bit_and => try self.airBinOp(inst),
|
||||
.bit_or => try self.airBinOp(inst),
|
||||
.xor => try self.airBinOp(inst),
|
||||
.shr, .shr_exact => try self.airBinOp(inst),
|
||||
|
||||
.alloc => try self.airAlloc(inst),
|
||||
.ret_ptr => try self.airRetPtr(inst),
|
||||
.arg => try self.airArg(inst),
|
||||
@ -1260,7 +1266,7 @@ fn minMax(
|
||||
// register.
|
||||
assert(lhs_reg != rhs_reg); // see note above
|
||||
|
||||
_ = try self.binOpRegister(.cmp, null, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty);
|
||||
_ = try self.binOpRegister(.cmp, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty, null);
|
||||
|
||||
const cond_choose_lhs: Condition = switch (tag) {
|
||||
.max => switch (int_info.signedness) {
|
||||
@ -1340,15 +1346,40 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
.inst = inst,
|
||||
});
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
.inst = inst,
|
||||
});
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
@ -1402,7 +1433,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.sub_with_overflow => .sub,
|
||||
else => unreachable,
|
||||
};
|
||||
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
@ -1415,7 +1446,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
// cmp dest, truncated
|
||||
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
|
||||
_ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
@ -1448,12 +1479,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const dest = blk: {
|
||||
if (rhs_immediate_ok) {
|
||||
break :blk try self.binOpImmediate(mir_tag, null, lhs, rhs, lhs_ty, false);
|
||||
break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
break :blk try self.binOpImmediate(mir_tag, null, rhs, lhs, rhs_ty, true);
|
||||
break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null);
|
||||
} else {
|
||||
break :blk try self.binOpRegister(mir_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1507,7 +1538,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.unsigned => .mul,
|
||||
};
|
||||
|
||||
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
const dest_reg = dest.register;
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
@ -1520,7 +1551,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
|
||||
|
||||
// cmp dest, truncated
|
||||
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
|
||||
_ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
@ -1594,7 +1625,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
|
||||
|
||||
// cmp truncated, rdlo
|
||||
_ = try self.binOp(.cmp_eq, null, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize);
|
||||
_ = try self.binOp(.cmp_eq, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize, null);
|
||||
|
||||
// mov rdlo, #0
|
||||
_ = try self.addInst(.{
|
||||
@ -1618,7 +1649,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
});
|
||||
|
||||
// cmp rdhi, #0
|
||||
_ = try self.binOp(.cmp_eq, null, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize);
|
||||
_ = try self.binOp(.cmp_eq, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize, null);
|
||||
|
||||
// movne rdlo, #1
|
||||
_ = try self.addInst(.{
|
||||
@ -1677,16 +1708,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.compare_flags_inst = null;
|
||||
|
||||
// lsl dest, lhs, rhs
|
||||
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null);
|
||||
const dest_reg = dest.register;
|
||||
const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_lock);
|
||||
|
||||
// asr/lsr reconstructed, dest, rhs
|
||||
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
|
||||
const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null);
|
||||
|
||||
// cmp lhs, reconstructed
|
||||
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
|
||||
_ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null);
|
||||
|
||||
try self.genSetStack(lhs_ty, stack_offset, dest);
|
||||
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
|
||||
@ -2031,7 +2062,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
},
|
||||
else => {
|
||||
const dest = try self.allocRegOrMem(inst, true);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
|
||||
try self.load(dest, addr, slice_ptr_field_type);
|
||||
|
||||
break :result dest;
|
||||
@ -2051,7 +2082,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const slice_ty = self.air.typeOf(extra.lhs);
|
||||
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ty, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null);
|
||||
break :result addr;
|
||||
};
|
||||
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
|
||||
@ -2079,7 +2110,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const ptr_ty = self.air.typeOf(extra.lhs);
|
||||
|
||||
const addr = try self.binOp(.ptr_add, null, ptr_mcv, index_mcv, ptr_ty, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null);
|
||||
break :result addr;
|
||||
};
|
||||
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
|
||||
@ -2411,11 +2442,11 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
null,
|
||||
.{ .register = addr_reg },
|
||||
.{ .register = offset_reg },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
null,
|
||||
);
|
||||
|
||||
break :result dest;
|
||||
@ -2514,11 +2545,11 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn binOpRegister(
|
||||
self: *Self,
|
||||
mir_tag: Mir.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
@ -2532,9 +2563,8 @@ fn binOpRegister(
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
break :inst Air.refToIndex(bin_op.lhs).?;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(md.lhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
@ -2547,9 +2577,8 @@ fn binOpRegister(
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
break :inst Air.refToIndex(bin_op.rhs).?;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(md.rhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
@ -2563,15 +2592,13 @@ fn binOpRegister(
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp => .r0, // cmp has no destination regardless
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
|
||||
else => if (metadata) |md| blk: {
|
||||
if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
|
||||
break :blk lhs_reg;
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst);
|
||||
break :blk try self.register_manager.allocReg(md.inst);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null),
|
||||
};
|
||||
@ -2634,11 +2661,11 @@ fn binOpRegister(
|
||||
fn binOpImmediate(
|
||||
self: *Self,
|
||||
mir_tag: Mir.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
lhs_and_rhs_swapped: bool,
|
||||
metadata: ?BinOpMetadata,
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
@ -2651,10 +2678,9 @@ fn binOpImmediate(
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
|
||||
).?;
|
||||
} else null;
|
||||
|
||||
@ -2669,18 +2695,16 @@ fn binOpImmediate(
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
.cmp => .r0, // cmp has no destination reg
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
else => if (metadata) |md| blk: {
|
||||
if (lhs_is_register and self.reuseOperand(
|
||||
inst,
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
md.inst,
|
||||
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
|
||||
if (lhs_and_rhs_swapped) 1 else 0,
|
||||
lhs,
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst);
|
||||
break :blk try self.register_manager.allocReg(md.inst);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null),
|
||||
};
|
||||
@ -2720,6 +2744,12 @@ fn binOpImmediate(
|
||||
return MCValue{ .register = dest_reg };
|
||||
}
|
||||
|
||||
const BinOpMetadata = struct {
|
||||
inst: Air.Inst.Index,
|
||||
lhs: Air.Inst.Ref,
|
||||
rhs: Air.Inst.Ref,
|
||||
};
|
||||
|
||||
/// For all your binary operation needs, this function will generate
|
||||
/// the corresponding Mir instruction(s). Returns the location of the
|
||||
/// result.
|
||||
@ -2735,11 +2765,11 @@ fn binOpImmediate(
|
||||
fn binOp(
|
||||
self: *Self,
|
||||
tag: Air.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) InnerError!MCValue {
|
||||
switch (tag) {
|
||||
.add,
|
||||
@ -2780,12 +2810,12 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
|
||||
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
|
||||
@ -2806,7 +2836,7 @@ fn binOp(
|
||||
// TODO add optimisations for multiplication
|
||||
// with immediates, for example a * 2 can be
|
||||
// lowered to a << 1
|
||||
return try self.binOpRegister(.mul, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
} else {
|
||||
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
|
||||
}
|
||||
@ -2826,7 +2856,7 @@ fn binOp(
|
||||
};
|
||||
|
||||
// Generate an add/sub/mul
|
||||
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (lhs_ty.zigTypeTag()) {
|
||||
@ -2869,12 +2899,12 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
|
||||
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
|
||||
@ -2903,9 +2933,9 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
|
||||
@ -2924,7 +2954,7 @@ fn binOp(
|
||||
};
|
||||
|
||||
// Generate a shl_exact/shr_exact
|
||||
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (tag) {
|
||||
@ -2964,12 +2994,12 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
|
||||
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
@ -2994,12 +3024,12 @@ fn binOp(
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
} else {
|
||||
// convert the offset into a byte offset by
|
||||
// multiplying it with elem_size
|
||||
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
|
||||
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
|
||||
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
|
||||
const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
|
||||
return addr;
|
||||
}
|
||||
},
|
||||
@ -3575,7 +3605,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = inst;
|
||||
|
||||
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
|
||||
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
.inst = inst,
|
||||
});
|
||||
|
||||
break :result switch (int_info.signedness) {
|
||||
.signed => MCValue{ .compare_flags_signed = op },
|
||||
@ -3865,7 +3899,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
|
||||
}
|
||||
|
||||
const error_mcv = try self.errUnionErr(operand, ty);
|
||||
_ = try self.binOp(.cmp_eq, null, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type);
|
||||
_ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null);
|
||||
return MCValue{ .compare_flags_unsigned = .gt };
|
||||
}
|
||||
|
||||
|
||||
@ -481,10 +481,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
|
||||
switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add, .ptr_add => try self.airBinOp(inst),
|
||||
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
|
||||
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
|
||||
|
||||
.add => try self.airBinOp(inst, .add),
|
||||
.sub => try self.airBinOp(inst, .sub),
|
||||
|
||||
.addwrap => try self.airAddWrap(inst),
|
||||
.add_sat => try self.airAddSat(inst),
|
||||
.sub, .ptr_sub => try self.airBinOp(inst),
|
||||
.subwrap => try self.airSubWrap(inst),
|
||||
.sub_sat => try self.airSubSat(inst),
|
||||
.mul => try self.airMul(inst),
|
||||
@ -1091,8 +1095,7 @@ fn binOp(
|
||||
}
|
||||
}
|
||||
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
@ -1103,6 +1106,18 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch});
|
||||
|
||||
@ -483,10 +483,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
|
||||
switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add, .ptr_add => try self.airBinOp(inst),
|
||||
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
|
||||
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
|
||||
|
||||
.add => try self.airBinOp(inst, .add),
|
||||
.addwrap => @panic("TODO try self.airAddWrap(inst)"),
|
||||
.add_sat => @panic("TODO try self.airAddSat(inst)"),
|
||||
.sub, .ptr_sub => @panic("TODO try self.airBinOp(inst)"),
|
||||
.sub => @panic("TODO try self.airBinOp(inst)"),
|
||||
.subwrap => @panic("TODO try self.airSubWrap(inst)"),
|
||||
.sub_sat => @panic("TODO try self.airSubSat(inst)"),
|
||||
.mul => @panic("TODO try self.airMul(inst)"),
|
||||
@ -827,18 +830,38 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, mcv, .{ .none, .none, .none });
|
||||
}
|
||||
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
.inst = inst,
|
||||
});
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
const result: MCValue = if (self.liveness.isUnused(inst))
|
||||
.dead
|
||||
else
|
||||
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
.inst = inst,
|
||||
});
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
@ -1030,7 +1053,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
|
||||
var int_buffer: Type.Payload.Bits = undefined;
|
||||
const int_ty = switch (lhs_ty.zigTypeTag()) {
|
||||
.Vector => unreachable, // Should be handled by cmp_vector?
|
||||
.Vector => unreachable, // Handled by cmp_vector.
|
||||
.Enum => lhs_ty.intTagType(&int_buffer),
|
||||
.Int => lhs_ty,
|
||||
.Bool => Type.initTag(.u1),
|
||||
@ -1053,7 +1076,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
|
||||
const int_info = int_ty.intInfo(self.target.*);
|
||||
if (int_info.bits <= 64) {
|
||||
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
|
||||
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
|
||||
.lhs = bin_op.lhs,
|
||||
.rhs = bin_op.rhs,
|
||||
.inst = inst,
|
||||
});
|
||||
|
||||
try self.spillCompareFlagsIfOccupied();
|
||||
self.compare_flags_inst = inst;
|
||||
@ -1426,7 +1453,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
// TODO skip the ptr_add emission entirely and use native addressing modes
|
||||
// i.e sllx/mulx then R+R or scale immediate then R+I
|
||||
const dest = try self.allocRegOrMem(inst, true);
|
||||
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
|
||||
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
|
||||
try self.load(dest, addr, slice_ptr_field_type);
|
||||
|
||||
break :result dest;
|
||||
@ -1595,6 +1622,12 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
return MCValue{ .stack_offset = stack_offset };
|
||||
}
|
||||
|
||||
const BinOpMetadata = struct {
|
||||
inst: Air.Inst.Index,
|
||||
lhs: Air.Inst.Ref,
|
||||
rhs: Air.Inst.Ref,
|
||||
};
|
||||
|
||||
/// For all your binary operation needs, this function will generate
|
||||
/// the corresponding Mir instruction(s). Returns the location of the
|
||||
/// result.
|
||||
@ -1610,11 +1643,11 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
fn binOp(
|
||||
self: *Self,
|
||||
tag: Air.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) InnerError!MCValue {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
switch (tag) {
|
||||
@ -1649,13 +1682,13 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else if (lhs_immediate_ok) {
|
||||
// swap lhs and rhs
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
|
||||
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
|
||||
} else {
|
||||
// TODO convert large immediates to register before adding
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
@ -1683,10 +1716,10 @@ fn binOp(
|
||||
// If it's a power of two immediate then we emit an shl instead
|
||||
// TODO add similar checks for LHS
|
||||
if (new_rhs == .immediate and math.isPowerOfTwo(new_rhs.immediate)) {
|
||||
return try self.binOp(.shl, maybe_inst, new_lhs, .{ .immediate = math.log2(new_rhs.immediate) }, new_lhs_ty, Type.usize);
|
||||
return try self.binOp(.shl, new_lhs, .{ .immediate = math.log2(new_rhs.immediate) }, new_lhs_ty, Type.usize, metadata);
|
||||
}
|
||||
|
||||
return try self.binOpRegister(.mulx, maybe_inst, new_lhs, new_rhs, new_lhs_ty, new_rhs_ty);
|
||||
return try self.binOpRegister(.mulx, new_lhs, new_rhs, new_lhs_ty, new_rhs_ty, metadata);
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
}
|
||||
@ -1711,13 +1744,13 @@ fn binOp(
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
} else {
|
||||
// convert the offset into a byte offset by
|
||||
// multiplying it with elem_size
|
||||
|
||||
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
|
||||
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
|
||||
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
|
||||
const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
|
||||
return addr;
|
||||
}
|
||||
},
|
||||
@ -1732,7 +1765,7 @@ fn binOp(
|
||||
};
|
||||
|
||||
// Generate a shl_exact/shr_exact
|
||||
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
|
||||
// Truncate if necessary
|
||||
switch (tag) {
|
||||
@ -1768,9 +1801,9 @@ fn binOp(
|
||||
};
|
||||
|
||||
if (rhs_immediate_ok) {
|
||||
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
|
||||
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
|
||||
} else {
|
||||
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
|
||||
}
|
||||
} else {
|
||||
return self.fail("TODO binary operations on int with bits > 64", .{});
|
||||
@ -1792,18 +1825,17 @@ fn binOp(
|
||||
/// op dest, lhs, #rhs_imm
|
||||
///
|
||||
/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to
|
||||
/// rhs and vice versa. This parameter is only used when maybe_inst !=
|
||||
/// null.
|
||||
/// rhs and vice versa. This parameter is only used when metadata != null.
|
||||
///
|
||||
/// Asserts that generating an instruction of that form is possible.
|
||||
fn binOpImmediate(
|
||||
self: *Self,
|
||||
mir_tag: Mir.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
lhs_and_rhs_swapped: bool,
|
||||
metadata: ?BinOpMetadata,
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
|
||||
@ -1816,10 +1848,9 @@ fn binOpImmediate(
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
|
||||
).?;
|
||||
} else null;
|
||||
|
||||
@ -1833,18 +1864,16 @@ fn binOpImmediate(
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
else => if (metadata) |md| blk: {
|
||||
if (lhs_is_register and self.reuseOperand(
|
||||
inst,
|
||||
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
|
||||
md.inst,
|
||||
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
|
||||
if (lhs_and_rhs_swapped) 1 else 0,
|
||||
lhs,
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst);
|
||||
break :blk try self.register_manager.allocReg(md.inst);
|
||||
}
|
||||
} else blk: {
|
||||
break :blk try self.register_manager.allocReg(null);
|
||||
@ -1896,11 +1925,11 @@ fn binOpImmediate(
|
||||
fn binOpRegister(
|
||||
self: *Self,
|
||||
mir_tag: Mir.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
metadata: ?BinOpMetadata,
|
||||
) !MCValue {
|
||||
const lhs_is_register = lhs == .register;
|
||||
const rhs_is_register = rhs == .register;
|
||||
@ -1920,9 +1949,8 @@ fn binOpRegister(
|
||||
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
break :inst Air.refToIndex(bin_op.lhs).?;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(md.lhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
@ -1934,9 +1962,8 @@ fn binOpRegister(
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
break :inst Air.refToIndex(bin_op.rhs).?;
|
||||
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
|
||||
break :inst Air.refToIndex(md.rhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst);
|
||||
@ -1948,15 +1975,13 @@ fn binOpRegister(
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_reg = switch (mir_tag) {
|
||||
else => if (maybe_inst) |inst| blk: {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
|
||||
else => if (metadata) |md| blk: {
|
||||
if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
|
||||
break :blk lhs_reg;
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst);
|
||||
break :blk try self.register_manager.allocReg(md.inst);
|
||||
}
|
||||
} else blk: {
|
||||
break :blk try self.register_manager.allocReg(null);
|
||||
@ -3069,11 +3094,11 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
|
||||
const dest = try self.binOp(
|
||||
.add,
|
||||
null,
|
||||
.{ .register = addr_reg },
|
||||
.{ .register = offset_reg },
|
||||
Type.usize,
|
||||
Type.usize,
|
||||
null,
|
||||
);
|
||||
|
||||
break :result dest;
|
||||
|
||||
@ -3397,7 +3397,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
|
||||
|
||||
fn airPtrBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
|
||||
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const ptr = try self.resolveInst(bin_op.lhs);
|
||||
const offset = try self.resolveInst(bin_op.rhs);
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
|
||||
@ -574,23 +574,33 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
|
||||
switch (air_tags[inst]) {
|
||||
// zig fmt: off
|
||||
.add => try self.airBinOp(inst),
|
||||
.addwrap => try self.airBinOp(inst),
|
||||
.add_sat => try self.airAddSat(inst),
|
||||
.sub => try self.airBinOp(inst),
|
||||
.subwrap => try self.airBinOp(inst),
|
||||
.sub_sat => try self.airSubSat(inst),
|
||||
.add => try self.airBinOp(inst, .add),
|
||||
.addwrap => try self.airBinOp(inst, .addwrap),
|
||||
.sub => try self.airBinOp(inst, .sub),
|
||||
.subwrap => try self.airBinOp(inst, .subwrap),
|
||||
.bool_and => try self.airBinOp(inst, .bool_and),
|
||||
.bool_or => try self.airBinOp(inst, .bool_or),
|
||||
.bit_and => try self.airBinOp(inst, .bit_and),
|
||||
.bit_or => try self.airBinOp(inst, .bit_or),
|
||||
.xor => try self.airBinOp(inst, .xor),
|
||||
|
||||
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
|
||||
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
|
||||
|
||||
.shr, .shr_exact => try self.airShlShrBinOp(inst),
|
||||
.shl, .shl_exact => try self.airShlShrBinOp(inst),
|
||||
|
||||
.mul => try self.airMulDivBinOp(inst),
|
||||
.mulwrap => try self.airMulDivBinOp(inst),
|
||||
.mul_sat => try self.airMulSat(inst),
|
||||
.rem => try self.airMulDivBinOp(inst),
|
||||
.mod => try self.airMulDivBinOp(inst),
|
||||
.shl, .shl_exact => try self.airShlShrBinOp(inst),
|
||||
|
||||
.add_sat => try self.airAddSat(inst),
|
||||
.sub_sat => try self.airSubSat(inst),
|
||||
.mul_sat => try self.airMulSat(inst),
|
||||
.shl_sat => try self.airShlSat(inst),
|
||||
.min => try self.airMin(inst),
|
||||
.max => try self.airMax(inst),
|
||||
.ptr_add => try self.airBinOp(inst),
|
||||
.ptr_sub => try self.airBinOp(inst),
|
||||
.slice => try self.airSlice(inst),
|
||||
|
||||
.sqrt,
|
||||
@ -626,13 +636,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.cmp_vector => try self.airCmpVector(inst),
|
||||
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
|
||||
|
||||
.bool_and => try self.airBinOp(inst),
|
||||
.bool_or => try self.airBinOp(inst),
|
||||
.bit_and => try self.airBinOp(inst),
|
||||
.bit_or => try self.airBinOp(inst),
|
||||
.xor => try self.airBinOp(inst),
|
||||
.shr, .shr_exact => try self.airShlShrBinOp(inst),
|
||||
|
||||
.alloc => try self.airAlloc(inst),
|
||||
.ret_ptr => try self.airRetPtr(inst),
|
||||
.arg => try self.airArg(inst),
|
||||
@ -1231,21 +1234,26 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
|
||||
if (self.liveness.isUnused(inst)) {
|
||||
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const lhs_ty = self.air.typeOf(bin_op.lhs);
|
||||
const rhs_ty = self.air.typeOf(bin_op.rhs);
|
||||
const result = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
const result = try self.genBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
|
||||
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
|
||||
if (self.liveness.isUnused(inst)) {
|
||||
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
const result = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
|
||||
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
|
||||
}
|
||||
|
||||
@ -1316,13 +1324,12 @@ fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.spillRegisters(1, .{.rcx});
|
||||
}
|
||||
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
|
||||
const partial: MCValue = switch (tag) {
|
||||
.add_with_overflow => try self.genBinOp(.add, null, lhs, rhs, ty, ty),
|
||||
.sub_with_overflow => try self.genBinOp(.sub, null, lhs, rhs, ty, ty),
|
||||
.add_with_overflow => try self.genBinOp(null, .add, bin_op.lhs, bin_op.rhs),
|
||||
.sub_with_overflow => try self.genBinOp(null, .sub, bin_op.lhs, bin_op.rhs),
|
||||
.shl_with_overflow => blk: {
|
||||
const lhs = try self.resolveInst(bin_op.lhs);
|
||||
const rhs = try self.resolveInst(bin_op.rhs);
|
||||
const shift_ty = self.air.typeOf(bin_op.rhs);
|
||||
break :blk try self.genShiftBinOp(.shl, null, lhs, rhs, ty, shift_ty);
|
||||
},
|
||||
@ -3310,13 +3317,15 @@ fn genMulDivBinOp(
|
||||
/// Result is always a register.
|
||||
fn genBinOp(
|
||||
self: *Self,
|
||||
tag: Air.Inst.Tag,
|
||||
maybe_inst: ?Air.Inst.Index,
|
||||
lhs: MCValue,
|
||||
rhs: MCValue,
|
||||
lhs_ty: Type,
|
||||
rhs_ty: Type,
|
||||
tag: Air.Inst.Tag,
|
||||
lhs_air: Air.Inst.Ref,
|
||||
rhs_air: Air.Inst.Ref,
|
||||
) !MCValue {
|
||||
const lhs = try self.resolveInst(lhs_air);
|
||||
const rhs = try self.resolveInst(rhs_air);
|
||||
const lhs_ty = self.air.typeOf(lhs_air);
|
||||
const rhs_ty = self.air.typeOf(rhs_air);
|
||||
if (lhs_ty.zigTypeTag() == .Vector or lhs_ty.zigTypeTag() == .Float) {
|
||||
return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmtDebug()});
|
||||
}
|
||||
@ -3352,11 +3361,10 @@ fn genBinOp(
|
||||
var flipped: bool = false;
|
||||
const dst_mcv: MCValue = blk: {
|
||||
if (maybe_inst) |inst| {
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
if (self.reuseOperand(inst, bin_op.lhs, 0, lhs) and lhs.isRegister()) {
|
||||
if (self.reuseOperand(inst, lhs_air, 0, lhs) and lhs.isRegister()) {
|
||||
break :blk lhs;
|
||||
}
|
||||
if (is_commutative and self.reuseOperand(inst, bin_op.rhs, 1, rhs) and rhs.isRegister()) {
|
||||
if (is_commutative and self.reuseOperand(inst, rhs_air, 1, rhs) and rhs.isRegister()) {
|
||||
flipped = true;
|
||||
break :blk rhs;
|
||||
}
|
||||
|
||||
@ -1711,21 +1711,18 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
|
||||
.unreach => try airUnreach(f),
|
||||
.fence => try airFence(f, inst),
|
||||
|
||||
// TODO use a different strategy for add that communicates to the optimizer
|
||||
// that wrapping is UB.
|
||||
.add => try airBinOp (f, inst, " + "),
|
||||
.ptr_add => try airPtrAddSub (f, inst, " + "),
|
||||
// TODO use a different strategy for sub that communicates to the optimizer
|
||||
// that wrapping is UB.
|
||||
.sub => try airBinOp (f, inst, " - "),
|
||||
.ptr_sub => try airPtrAddSub (f, inst, " - "),
|
||||
// TODO use a different strategy for mul that communicates to the optimizer
|
||||
// that wrapping is UB.
|
||||
.mul => try airBinOp (f, inst, " * "),
|
||||
// TODO use a different strategy for div that communicates to the optimizer
|
||||
// that wrapping is UB.
|
||||
.ptr_add => try airPtrAddSub(f, inst, " + "),
|
||||
.ptr_sub => try airPtrAddSub(f, inst, " - "),
|
||||
|
||||
// TODO use a different strategy for add, sub, mul, div
|
||||
// that communicates to the optimizer that wrapping is UB.
|
||||
.add => try airBinOp (f, inst, " + "),
|
||||
.sub => try airBinOp (f, inst, " - "),
|
||||
.mul => try airBinOp (f, inst, " * "),
|
||||
.div_float, .div_exact => try airBinOp( f, inst, " / "),
|
||||
.div_trunc => blk: {
|
||||
.rem => try airBinOp( f, inst, " % "),
|
||||
|
||||
.div_trunc => blk: {
|
||||
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
|
||||
const lhs_ty = f.air.typeOf(bin_op.lhs);
|
||||
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
|
||||
@ -1735,9 +1732,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
|
||||
else
|
||||
try airBinOpBuiltinCall(f, inst, "div_trunc");
|
||||
},
|
||||
.div_floor => try airBinOpBuiltinCall(f, inst, "div_floor"),
|
||||
.rem => try airBinOp( f, inst, " % "),
|
||||
.mod => try airBinOpBuiltinCall(f, inst, "mod"),
|
||||
.div_floor => try airBinOpBuiltinCall(f, inst, "div_floor"),
|
||||
.mod => try airBinOpBuiltinCall(f, inst, "mod"),
|
||||
|
||||
.addwrap => try airWrapOp(f, inst, " + ", "addw_"),
|
||||
.subwrap => try airWrapOp(f, inst, " - ", "subw_"),
|
||||
@ -2617,10 +2613,10 @@ fn airEquality(
|
||||
}
|
||||
|
||||
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue {
|
||||
if (f.liveness.isUnused(inst))
|
||||
return CValue.none;
|
||||
if (f.liveness.isUnused(inst)) return CValue.none;
|
||||
|
||||
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
|
||||
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const lhs = try f.resolveInst(bin_op.lhs);
|
||||
const rhs = try f.resolveInst(bin_op.rhs);
|
||||
|
||||
|
||||
@ -5679,7 +5679,8 @@ pub const FuncGen = struct {
|
||||
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst)) return null;
|
||||
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const base_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const offset = try self.resolveInst(bin_op.rhs);
|
||||
const ptr_ty = self.air.typeOf(bin_op.lhs);
|
||||
@ -5698,7 +5699,8 @@ pub const FuncGen = struct {
|
||||
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst)) return null;
|
||||
|
||||
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
const base_ptr = try self.resolveInst(bin_op.lhs);
|
||||
const offset = try self.resolveInst(bin_op.rhs);
|
||||
const negative_offset = self.builder.buildNeg(offset, "");
|
||||
|
||||
@ -114,8 +114,6 @@ const Writer = struct {
|
||||
.div_exact,
|
||||
.rem,
|
||||
.mod,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
.bit_and,
|
||||
.bit_or,
|
||||
.xor,
|
||||
@ -231,6 +229,12 @@ const Writer = struct {
|
||||
.slice,
|
||||
.slice_elem_ptr,
|
||||
.ptr_elem_ptr,
|
||||
.ptr_add,
|
||||
.ptr_sub,
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
=> try w.writeTyPlBin(s, inst),
|
||||
|
||||
.call,
|
||||
@ -275,12 +279,6 @@ const Writer = struct {
|
||||
.reduce => try w.writeReduce(s, inst),
|
||||
.cmp_vector => try w.writeCmpVector(s, inst),
|
||||
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
=> try w.writeOverflow(s, inst),
|
||||
|
||||
.dbg_block_begin, .dbg_block_end => {},
|
||||
}
|
||||
}
|
||||
@ -478,15 +476,6 @@ const Writer = struct {
|
||||
try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
|
||||
}
|
||||
|
||||
fn writeOverflow(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = w.air.extraData(Air.Bin, ty_pl.payload).data;
|
||||
|
||||
try w.writeOperand(s, inst, 0, extra.lhs);
|
||||
try s.writeAll(", ");
|
||||
try w.writeOperand(s, inst, 1, extra.rhs);
|
||||
}
|
||||
|
||||
fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
|
||||
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
|
||||
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
|
||||
|
||||
@ -1813,27 +1813,6 @@ pub const Value = extern union {
|
||||
};
|
||||
}
|
||||
|
||||
/// Asserts the value is numeric
|
||||
pub fn isZero(self: Value) bool {
|
||||
return switch (self.tag()) {
|
||||
.zero, .the_only_possible_value => true,
|
||||
.one => false,
|
||||
|
||||
.int_u64 => self.castTag(.int_u64).?.data == 0,
|
||||
.int_i64 => self.castTag(.int_i64).?.data == 0,
|
||||
|
||||
.float_16 => self.castTag(.float_16).?.data == 0,
|
||||
.float_32 => self.castTag(.float_32).?.data == 0,
|
||||
.float_64 => self.castTag(.float_64).?.data == 0,
|
||||
.float_80 => self.castTag(.float_80).?.data == 0,
|
||||
.float_128 => self.castTag(.float_128).?.data == 0,
|
||||
|
||||
.int_big_positive => self.castTag(.int_big_positive).?.asBigInt().eqZero(),
|
||||
.int_big_negative => self.castTag(.int_big_negative).?.asBigInt().eqZero(),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn orderAgainstZero(lhs: Value) std.math.Order {
|
||||
return orderAgainstZeroAdvanced(lhs, null) catch unreachable;
|
||||
}
|
||||
@ -3442,7 +3421,6 @@ pub const Value = extern union {
|
||||
const info = ty.intInfo(target);
|
||||
|
||||
if (info.bits == 0) {
|
||||
assert(val.isZero()); // Sema should guarantee
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
@ -16,11 +16,22 @@ test "global variable alignment" {
|
||||
const slice = @as(*align(4) [1]u8, &foo)[0..];
|
||||
comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
|
||||
}
|
||||
{
|
||||
var runtime_zero: usize = 0;
|
||||
const slice = @as(*align(4) [1]u8, &foo)[runtime_zero..];
|
||||
comptime try expect(@TypeOf(slice) == []align(4) u8);
|
||||
}
|
||||
}
|
||||
|
||||
test "slicing array of length 1 can assume runtime index is always zero" {
|
||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
||||
|
||||
// TODO reevaluate this test case, because notice that you can
|
||||
// change `runtime_zero` to be `1` and the test still passes for stage1.
|
||||
// Reconsider also this code:
|
||||
// var array: [4]u8 = undefined;
|
||||
// var runtime: usize = 4;
|
||||
// var ptr = array[runtime..];
|
||||
// _ = ptr;
|
||||
|
||||
var runtime_zero: usize = 0;
|
||||
const slice = @as(*align(4) [1]u8, &foo)[runtime_zero..];
|
||||
comptime try expect(@TypeOf(slice) == []align(4) u8);
|
||||
}
|
||||
|
||||
test "default alignment allows unspecified in type syntax" {
|
||||
|
||||
@ -377,8 +377,6 @@ test "pointer to array at fixed address" {
|
||||
}
|
||||
|
||||
test "pointer arithmetic affects the alignment" {
|
||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
||||
|
||||
{
|
||||
var ptr: [*]align(8) u32 = undefined;
|
||||
var x: usize = 1;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user