Merge pull request #11660 from ziglang/stage2-behavior

stage2: bug fixes towards more behavior tests passing
This commit is contained in:
Andrew Kelley 2022-05-18 15:29:19 -04:00 committed by GitHub
commit 8660661af4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 624 additions and 389 deletions

View File

@ -893,6 +893,7 @@ const LinuxThreadImpl = struct {
};
fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl {
const page_size = std.mem.page_size;
const Args = @TypeOf(args);
const Instance = struct {
fn_args: Args,
@ -915,11 +916,11 @@ const LinuxThreadImpl = struct {
var instance_offset: usize = undefined;
const map_bytes = blk: {
var bytes: usize = std.mem.page_size;
var bytes: usize = page_size;
guard_offset = bytes;
bytes += std.math.max(std.mem.page_size, config.stack_size);
bytes = std.mem.alignForward(bytes, std.mem.page_size);
bytes += std.math.max(page_size, config.stack_size);
bytes = std.mem.alignForward(bytes, page_size);
stack_offset = bytes;
bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align);
@ -930,7 +931,7 @@ const LinuxThreadImpl = struct {
instance_offset = bytes;
bytes += @sizeOf(Instance);
bytes = std.mem.alignForward(bytes, std.mem.page_size);
bytes = std.mem.alignForward(bytes, page_size);
break :blk bytes;
};
@ -954,7 +955,7 @@ const LinuxThreadImpl = struct {
// map everything but the guard page as read/write
os.mprotect(
mapped[guard_offset..],
@alignCast(page_size, mapped[guard_offset..]),
os.PROT.READ | os.PROT.WRITE,
) catch |err| switch (err) {
error.AccessDenied => unreachable,

View File

@ -345,7 +345,7 @@ const PageAllocator = struct {
// Unmap extra pages
const aligned_buffer_len = alloc_len - drop_len;
if (aligned_buffer_len > aligned_len) {
os.munmap(result_ptr[aligned_len..aligned_buffer_len]);
os.munmap(@alignCast(mem.page_size, result_ptr[aligned_len..aligned_buffer_len]));
}
const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);

View File

@ -113,13 +113,13 @@ pub const Inst = struct {
/// The offset is in element type units, not bytes.
/// Wrapping is undefined behavior.
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
/// Uses the `bin_op` field.
/// Uses the `ty_pl` field. Payload is `Bin`.
ptr_add,
/// Subtract an offset from a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is undefined behavior.
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
/// Uses the `bin_op` field.
/// Uses the `ty_pl` field. Payload is `Bin`.
ptr_sub,
/// Given two operands which can be floats, integers, or vectors, returns the
/// greater of the operands. For vectors it operates element-wise.
@ -916,8 +916,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.bit_and,
.bit_or,
.xor,
.ptr_add,
.ptr_sub,
.shr,
.shr_exact,
.shl,
@ -989,6 +987,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.ptr_add,
.ptr_sub,
=> return air.getRefType(datas[inst].ty_pl.ty),
.not,

View File

@ -312,8 +312,6 @@ fn analyzeInst(
.div_exact,
.rem,
.mod,
.ptr_add,
.ptr_sub,
.bit_and,
.bit_or,
.xor,
@ -441,6 +439,21 @@ fn analyzeInst(
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });
},
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.ptr_add,
.ptr_sub,
.ptr_elem_ptr,
.slice_elem_ptr,
.slice,
=> {
const ty_pl = inst_datas[inst].ty_pl;
const extra = a.air.extraData(Air.Bin, ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
},
.dbg_var_ptr,
.dbg_var_val,
=> {
@ -529,10 +542,6 @@ fn analyzeInst(
const extra = a.air.extraData(Air.FieldParentPtr, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.field_ptr, .none, .none });
},
.ptr_elem_ptr, .slice_elem_ptr, .slice => {
const extra = a.air.extraData(Air.Bin, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
},
.cmpxchg_strong, .cmpxchg_weak => {
const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value });
@ -558,15 +567,7 @@ fn analyzeInst(
const extra = a.air.extraData(Air.Bin, pl_op.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs });
},
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
=> {
const ty_pl = inst_datas[inst].ty_pl;
const extra = a.air.extraData(Air.Bin, ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
},
.br => {
const br = inst_datas[inst].br;
return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none });

View File

@ -7020,22 +7020,25 @@ fn intCast(
operand_src: LazySrcLoc,
runtime_safety: bool,
) CompileError!Air.Inst.Ref {
// TODO: Add support for vectors
const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty);
_ = try sema.checkIntType(block, operand_src, sema.typeOf(operand));
const operand_ty = sema.typeOf(operand);
const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
if (try sema.isComptimeKnown(block, operand_src, operand)) {
return sema.coerce(block, dest_ty, operand, operand_src);
} else if (dest_is_comptime_int) {
} else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) {
return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{});
}
try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src);
const is_vector = dest_ty.zigTypeTag() == .Vector;
if ((try sema.typeHasOnePossibleValue(block, dest_ty_src, dest_ty))) |opv| {
// requirement: intCast(u0, input) iff input == 0
if (runtime_safety and block.wantSafety()) {
try sema.requireRuntimeBlock(block, operand_src);
const target = sema.mod.getTarget();
const wanted_info = dest_ty.intInfo(target);
const wanted_info = dest_scalar_ty.intInfo(target);
const wanted_bits = wanted_info.bits;
if (wanted_bits == 0) {
@ -7051,9 +7054,8 @@ fn intCast(
try sema.requireRuntimeBlock(block, operand_src);
if (runtime_safety and block.wantSafety()) {
const target = sema.mod.getTarget();
const operand_ty = sema.typeOf(operand);
const actual_info = operand_ty.intInfo(target);
const wanted_info = dest_ty.intInfo(target);
const actual_info = operand_scalar_ty.intInfo(target);
const wanted_info = dest_scalar_ty.intInfo(target);
const actual_bits = actual_info.bits;
const wanted_bits = wanted_info.bits;
const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed);
@ -7062,7 +7064,11 @@ fn intCast(
// range shrinkage
// requirement: int value fits into target type
if (wanted_value_bits < actual_value_bits) {
const dest_max_val = try dest_ty.maxInt(sema.arena, target);
const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target);
const dest_max_val = if (is_vector)
try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar)
else
dest_max_val_scalar;
const dest_max = try sema.addConstant(operand_ty, dest_max_val);
const diff = try block.addBinOp(.subwrap, dest_max, operand);
@ -7080,19 +7086,59 @@ fn intCast(
} else dest_max_val;
const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val);
const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range);
try sema.addSafetyCheck(block, is_in_range, .cast_truncated_data);
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte, try sema.addType(operand_ty));
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
} },
});
break :ok all_in_range;
} else ok: {
const is_in_range = try block.addBinOp(.cmp_lte, diff_unsigned, dest_range);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
} else {
const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max);
try sema.addSafetyCheck(block, is_in_range, .cast_truncated_data);
const ok = if (is_vector) ok: {
const is_in_range = try block.addCmpVector(diff, dest_max, .lte, try sema.addType(operand_ty));
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
} },
});
break :ok all_in_range;
} else ok: {
const is_in_range = try block.addBinOp(.cmp_lte, diff, dest_max);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
}
}
// no shrinkage, yes sign loss
// requirement: signed to unsigned >= 0
else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) {
const zero_inst = try sema.addConstant(operand_ty, Value.zero);
const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst);
try sema.addSafetyCheck(block, is_in_range, .cast_truncated_data);
} else if (actual_info.signedness == .signed and wanted_info.signedness == .unsigned) {
// no shrinkage, yes sign loss
// requirement: signed to unsigned >= 0
const ok = if (is_vector) ok: {
const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero);
const zero_inst = try sema.addConstant(operand_ty, zero_val);
const is_in_range = try block.addCmpVector(operand, zero_inst, .lte, try sema.addType(operand_ty));
const all_in_range = try block.addInst(.{
.tag = .reduce,
.data = .{ .reduce = .{
.operand = is_in_range,
.operation = .And,
} },
});
break :ok all_in_range;
} else ok: {
const zero_inst = try sema.addConstant(operand_ty, Value.zero);
const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst);
break :ok is_in_range;
};
try sema.addSafetyCheck(block, ok, .cast_truncated_data);
}
}
return block.addTyOp(.intcast, dest_ty, operand);
@ -10610,28 +10656,55 @@ fn analyzePtrArithmetic(
// TODO if the operand is comptime-known to be negative, or is a negative int,
// coerce to isize instead of usize.
const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
// TODO adjust the return type according to alignment and other factors
const target = sema.mod.getTarget();
const runtime_src = rs: {
if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| {
if (try sema.resolveMaybeUndefVal(block, offset_src, offset)) |offset_val| {
const ptr_ty = sema.typeOf(ptr);
const new_ptr_ty = ptr_ty; // TODO modify alignment
const opt_ptr_val = try sema.resolveMaybeUndefVal(block, ptr_src, ptr);
const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset);
const ptr_ty = sema.typeOf(ptr);
const ptr_info = ptr_ty.ptrInfo().data;
const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array)
ptr_info.pointee_type.childType()
else
ptr_info.pointee_type;
if (ptr_val.isUndef() or offset_val.isUndef()) {
return sema.addConstUndef(new_ptr_ty);
}
const new_ptr_ty = t: {
// Calculate the new pointer alignment.
if (ptr_info.@"align" == 0) {
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
break :t ptr_ty;
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
const elem_size = elem_ty.abiSize(target);
const addend = if (opt_off_val) |off_val| a: {
const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target));
break :a elem_size * off_int;
} else elem_size;
// The resulting pointer is aligned to the lcd between the offset (an
// arbitrary number) and the alignment factor (always a power of two,
// non zero).
const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, addend | ptr_info.@"align"));
break :t try Type.ptr(sema.arena, sema.mod, .{
.pointee_type = ptr_info.pointee_type,
.sentinel = ptr_info.sentinel,
.@"align" = new_align,
.@"addrspace" = ptr_info.@"addrspace",
.mutable = ptr_info.mutable,
.@"allowzero" = ptr_info.@"allowzero",
.@"volatile" = ptr_info.@"volatile",
.size = ptr_info.size,
});
};
const runtime_src = rs: {
if (opt_ptr_val) |ptr_val| {
if (opt_off_val) |offset_val| {
if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty);
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target));
// TODO I tried to put this check earlier but it the LLVM backend generate invalid instructinons
if (offset_int == 0) return ptr;
if (try ptr_val.getUnsignedIntAdvanced(target, sema.kit(block, ptr_src))) |addr| {
const ptr_child_ty = ptr_ty.childType();
const elem_ty = if (ptr_ty.isSinglePointer() and ptr_child_ty.zigTypeTag() == .Array)
ptr_child_ty.childType()
else
ptr_child_ty;
const elem_size = elem_ty.abiSize(target);
const new_addr = switch (air_tag) {
.ptr_add => addr + elem_size * offset_int,
@ -10651,7 +10724,16 @@ fn analyzePtrArithmetic(
};
try sema.requireRuntimeBlock(block, runtime_src);
return block.addBinOp(air_tag, ptr, offset);
return block.addInst(.{
.tag = air_tag,
.data = .{ .ty_pl = .{
.ty = try sema.addType(new_ptr_ty),
.payload = try sema.addExtra(Air.Bin{
.lhs = ptr,
.rhs = offset,
}),
} },
});
}
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -14481,8 +14563,8 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const dest_scalar_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = sema.resolveInst(extra.rhs);
const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand, operand_src);
const operand_ty = sema.typeOf(operand);
const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
const is_vector = operand_ty.zigTypeTag() == .Vector;
const dest_ty = if (is_vector)
try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty)
@ -14650,7 +14732,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand = sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand, operand_src);
const scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
const target = sema.mod.getTarget();
const bits = scalar_ty.intInfo(target).bits;
if (bits % 8 != 0) {
@ -14707,7 +14789,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const operand = sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
_ = try sema.checkIntOrVectorAllowComptime(block, operand, operand_src);
_ = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
if (try sema.typeHasOnePossibleValue(block, operand_src, operand_ty)) |val| {
return sema.addConstant(operand_ty, val);
@ -15059,10 +15141,9 @@ fn checkIntOrVector(
fn checkIntOrVectorAllowComptime(
sema: *Sema,
block: *Block,
operand: Air.Inst.Ref,
operand_ty: Type,
operand_src: LazySrcLoc,
) CompileError!Type {
const operand_ty = sema.typeOf(operand);
switch (try operand_ty.zigTypeTagOrPoison()) {
.Int, .ComptimeInt => return operand_ty,
.Vector => {

View File

@ -545,18 +545,30 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airBinOp(inst),
.addwrap => try self.airBinOp(inst),
.add => try self.airBinOp(inst, .add),
.addwrap => try self.airBinOp(inst, .addwrap),
.sub => try self.airBinOp(inst, .sub),
.subwrap => try self.airBinOp(inst, .subwrap),
.mul => try self.airBinOp(inst, .mul),
.mulwrap => try self.airBinOp(inst, .mulwrap),
.shl => try self.airBinOp(inst, .shl),
.shl_exact => try self.airBinOp(inst, .shl_exact),
.bool_and => try self.airBinOp(inst, .bool_and),
.bool_or => try self.airBinOp(inst, .bool_or),
.bit_and => try self.airBinOp(inst, .bit_and),
.bit_or => try self.airBinOp(inst, .bit_or),
.xor => try self.airBinOp(inst, .xor),
.shr => try self.airBinOp(inst, .shr),
.shr_exact => try self.airBinOp(inst, .shr_exact),
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
.add_sat => try self.airAddSat(inst),
.sub, .ptr_sub => try self.airBinOp(inst),
.subwrap => try self.airBinOp(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airBinOp(inst),
.mulwrap => try self.airBinOp(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airBinOp(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
@ -595,13 +607,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_vector => try self.airCmpVector(inst),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.bool_and => try self.airBinOp(inst),
.bool_or => try self.airBinOp(inst),
.bit_and => try self.airBinOp(inst),
.bit_or => try self.airBinOp(inst),
.xor => try self.airBinOp(inst),
.shr, .shr_exact => try self.airBinOp(inst),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
.arg => try self.airArg(inst),
@ -1260,11 +1265,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
fn binOpRegister(
self: *Self,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
metadata: ?BinOpMetadata,
) !MCValue {
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
@ -1284,9 +1289,8 @@ fn binOpRegister(
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.lhs).?;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.lhs).?;
} else null;
const raw_reg = try self.register_manager.allocReg(track_inst);
@ -1300,9 +1304,8 @@ fn binOpRegister(
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.rhs).?;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.rhs).?;
} else null;
const raw_reg = try self.register_manager.allocReg(track_inst);
@ -1317,15 +1320,13 @@ fn binOpRegister(
const dest_reg = switch (mir_tag) {
.cmp_shifted_register => undefined, // cmp has no destination register
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
else => if (metadata) |md| blk: {
if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
break :blk lhs_reg;
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
break :blk rhs_reg;
} else {
const raw_reg = try self.register_manager.allocReg(inst);
const raw_reg = try self.register_manager.allocReg(md.inst);
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
}
} else blk: {
@ -1407,11 +1408,11 @@ fn binOpRegister(
fn binOpImmediate(
self: *Self,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
lhs_and_rhs_swapped: bool,
metadata: ?BinOpMetadata,
) !MCValue {
const lhs_is_register = lhs == .register;
@ -1424,10 +1425,9 @@ fn binOpImmediate(
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
).?;
} else null;
@ -1443,18 +1443,16 @@ fn binOpImmediate(
const dest_reg = switch (mir_tag) {
.cmp_immediate => undefined, // cmp has no destination register
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
else => if (metadata) |md| blk: {
if (lhs_is_register and self.reuseOperand(
inst,
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
md.inst,
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
if (lhs_and_rhs_swapped) 1 else 0,
lhs,
)) {
break :blk lhs_reg;
} else {
const raw_reg = try self.register_manager.allocReg(inst);
const raw_reg = try self.register_manager.allocReg(md.inst);
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
}
} else blk: {
@ -1498,6 +1496,12 @@ fn binOpImmediate(
return MCValue{ .register = dest_reg };
}
const BinOpMetadata = struct {
inst: Air.Inst.Index,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
};
/// For all your binary operation needs, this function will generate
/// the corresponding Mir instruction(s). Returns the location of the
/// result.
@ -1513,11 +1517,11 @@ fn binOpImmediate(
fn binOp(
self: *Self,
tag: Air.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
metadata: ?BinOpMetadata,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
switch (tag) {
@ -1562,12 +1566,12 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag_immediate, lhs, rhs, lhs_ty, false, metadata);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, rhs, lhs, rhs_ty, true);
return try self.binOpImmediate(mir_tag_immediate, rhs, lhs, rhs_ty, true, metadata);
} else {
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
@ -1586,7 +1590,7 @@ fn binOp(
// TODO add optimisations for multiplication
// with immediates, for example a * 2 can be
// lowered to a << 1
return try self.binOpRegister(.mul, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata);
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
@ -1606,7 +1610,7 @@ fn binOp(
};
// Generate an add/sub/mul
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
switch (lhs_ty.zigTypeTag()) {
@ -1642,7 +1646,7 @@ fn binOp(
else => unreachable,
};
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
@ -1678,9 +1682,9 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag_immediate, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag_immediate, lhs, rhs, lhs_ty, false, metadata);
} else {
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
@ -1699,7 +1703,7 @@ fn binOp(
};
// Generate a shl_exact/shr_exact
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
switch (tag) {
@ -1735,7 +1739,7 @@ fn binOp(
else => unreachable,
};
return try self.binOpRegister(mir_tag_register, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, metadata);
},
else => unreachable,
}
@ -1759,12 +1763,12 @@ fn binOp(
else => unreachable,
};
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
} else {
// convert the offset into a byte offset by
// multiplying it with elem_size
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
return addr;
}
},
@ -1775,8 +1779,7 @@ fn binOp(
}
}
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -1786,7 +1789,30 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
.inst = inst,
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
.inst = inst,
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -1841,7 +1867,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
.sub_with_overflow => .sub,
else => unreachable,
};
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
@ -1855,7 +1881,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
// cmp dest, truncated
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
_ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
@ -1894,12 +1920,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dest = blk: {
if (rhs_immediate_ok) {
break :blk try self.binOpImmediate(mir_tag_immediate, null, lhs, rhs, lhs_ty, false);
break :blk try self.binOpImmediate(mir_tag_immediate, lhs, rhs, lhs_ty, false, null);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
break :blk try self.binOpImmediate(mir_tag_immediate, null, rhs, lhs, rhs_ty, true);
break :blk try self.binOpImmediate(mir_tag_immediate, rhs, lhs, rhs_ty, true, null);
} else {
break :blk try self.binOpRegister(mir_tag_register, null, lhs, rhs, lhs_ty, rhs_ty);
break :blk try self.binOpRegister(mir_tag_register, lhs, rhs, lhs_ty, rhs_ty, null);
}
};
@ -1952,7 +1978,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.unsigned => .umull,
};
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
@ -2136,11 +2162,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.binOp(
.cmp_eq,
null,
.{ .register = dest_high_reg },
.{ .immediate = 0 },
Type.usize,
Type.usize,
null,
);
if (int_info.bits < 64) {
@ -2156,11 +2182,11 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
_ = try self.binOp(
.cmp_eq,
null,
.{ .register = dest_high_reg },
.{ .immediate = 0 },
Type.usize,
Type.usize,
null,
);
}
},
@ -2218,16 +2244,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
self.compare_flags_inst = null;
// lsl dest, lhs, rhs
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
// asr/lsr reconstructed, dest, rhs
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null);
// cmp lhs, reconstructed
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
_ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null);
try self.genSetStack(lhs_ty, stack_offset, dest);
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{
@ -2489,7 +2515,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
switch (elem_size) {
else => {
const dest = try self.allocRegOrMem(inst, true);
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
try self.load(dest, addr, slice_ptr_field_type);
break :result dest;
@ -2933,11 +2959,11 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const dest = try self.binOp(
.add,
null,
.{ .register = addr_reg },
.{ .register = offset_reg },
Type.usize,
Type.usize,
null,
);
break :result dest;
@ -3302,7 +3328,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const int_info = int_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
.inst = inst,
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
});
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;

View File

@ -552,21 +552,34 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airBinOp(inst),
.addwrap => try self.airBinOp(inst),
.add, => try self.airBinOp(inst, .add),
.addwrap => try self.airBinOp(inst, .addwrap),
.sub, => try self.airBinOp(inst, .sub),
.subwrap => try self.airBinOp(inst, .subwrap),
.mul => try self.airBinOp(inst, .mul),
.mulwrap => try self.airBinOp(inst, .mulwrap),
.shl => try self.airBinOp(inst, .shl),
.shl_exact => try self.airBinOp(inst, .shl_exact),
.bool_and => try self.airBinOp(inst, .bool_and),
.bool_or => try self.airBinOp(inst, .bool_or),
.bit_and => try self.airBinOp(inst, .bit_and),
.bit_or => try self.airBinOp(inst, .bit_or),
.xor => try self.airBinOp(inst, .xor),
.shr => try self.airBinOp(inst, .shr),
.shr_exact => try self.airBinOp(inst, .shr_exact),
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
.min => try self.airMinMax(inst),
.max => try self.airMinMax(inst),
.add_sat => try self.airAddSat(inst),
.sub, .ptr_sub => try self.airBinOp(inst),
.subwrap => try self.airBinOp(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airBinOp(inst),
.mulwrap => try self.airBinOp(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airBinOp(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMinMax(inst),
.max => try self.airMinMax(inst),
.slice => try self.airSlice(inst),
.sqrt,
@ -602,13 +615,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_vector => try self.airCmpVector(inst),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.bool_and => try self.airBinOp(inst),
.bool_or => try self.airBinOp(inst),
.bit_and => try self.airBinOp(inst),
.bit_or => try self.airBinOp(inst),
.xor => try self.airBinOp(inst),
.shr, .shr_exact => try self.airBinOp(inst),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
.arg => try self.airArg(inst),
@ -1260,7 +1266,7 @@ fn minMax(
// register.
assert(lhs_reg != rhs_reg); // see note above
_ = try self.binOpRegister(.cmp, null, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty);
_ = try self.binOpRegister(.cmp, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty, null);
const cond_choose_lhs: Condition = switch (tag) {
.max => switch (int_info.signedness) {
@ -1340,15 +1346,40 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
.inst = inst,
});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
.inst = inst,
});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -1402,7 +1433,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
.sub_with_overflow => .sub,
else => unreachable,
};
const dest = try self.binOp(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
@ -1415,7 +1446,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
// cmp dest, truncated
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
_ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
@ -1448,12 +1479,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
const dest = blk: {
if (rhs_immediate_ok) {
break :blk try self.binOpImmediate(mir_tag, null, lhs, rhs, lhs_ty, false);
break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
break :blk try self.binOpImmediate(mir_tag, null, rhs, lhs, rhs_ty, true);
break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null);
} else {
break :blk try self.binOpRegister(mir_tag, null, lhs, rhs, lhs_ty, rhs_ty);
break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null);
}
};
@ -1507,7 +1538,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
.unsigned => .mul,
};
const dest = try self.binOpRegister(base_tag, null, lhs, rhs, lhs_ty, rhs_ty);
const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_reg_lock);
@ -1520,7 +1551,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits);
// cmp dest, truncated
_ = try self.binOp(.cmp_eq, null, dest, .{ .register = truncated_reg }, Type.usize, Type.usize);
_ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null);
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
@ -1594,7 +1625,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg });
// cmp truncated, rdlo
_ = try self.binOp(.cmp_eq, null, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize);
_ = try self.binOp(.cmp_eq, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize, null);
// mov rdlo, #0
_ = try self.addInst(.{
@ -1618,7 +1649,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
});
// cmp rdhi, #0
_ = try self.binOp(.cmp_eq, null, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize);
_ = try self.binOp(.cmp_eq, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize, null);
// movne rdlo, #1
_ = try self.addInst(.{
@ -1677,16 +1708,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
self.compare_flags_inst = null;
// lsl dest, lhs, rhs
const dest = try self.binOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty);
const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null);
const dest_reg = dest.register;
const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
defer self.register_manager.unlockReg(dest_lock);
// asr/lsr reconstructed, dest, rhs
const reconstructed = try self.binOp(.shr, null, dest, rhs, lhs_ty, rhs_ty);
const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null);
// cmp lhs, reconstructed
_ = try self.binOp(.cmp_eq, null, lhs, reconstructed, lhs_ty, lhs_ty);
_ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null);
try self.genSetStack(lhs_ty, stack_offset, dest);
try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags_unsigned = .neq });
@ -2031,7 +2062,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
},
else => {
const dest = try self.allocRegOrMem(inst, true);
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
try self.load(dest, addr, slice_ptr_field_type);
break :result dest;
@ -2051,7 +2082,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const slice_ty = self.air.typeOf(extra.lhs);
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ty, Type.usize);
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null);
break :result addr;
};
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
@ -2079,7 +2110,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.air.typeOf(extra.lhs);
const addr = try self.binOp(.ptr_add, null, ptr_mcv, index_mcv, ptr_ty, Type.usize);
const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null);
break :result addr;
};
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
@ -2411,11 +2442,11 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const dest = try self.binOp(
.add,
null,
.{ .register = addr_reg },
.{ .register = offset_reg },
Type.usize,
Type.usize,
null,
);
break :result dest;
@ -2514,11 +2545,11 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
fn binOpRegister(
self: *Self,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
metadata: ?BinOpMetadata,
) !MCValue {
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
@ -2532,9 +2563,8 @@ fn binOpRegister(
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.lhs).?;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.lhs).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
@ -2547,9 +2577,8 @@ fn binOpRegister(
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.rhs).?;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.rhs).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
@ -2563,15 +2592,13 @@ fn binOpRegister(
const dest_reg = switch (mir_tag) {
.cmp => .r0, // cmp has no destination regardless
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
else => if (metadata) |md| blk: {
if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
break :blk lhs_reg;
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
break :blk rhs_reg;
} else {
break :blk try self.register_manager.allocReg(inst);
break :blk try self.register_manager.allocReg(md.inst);
}
} else try self.register_manager.allocReg(null),
};
@ -2634,11 +2661,11 @@ fn binOpRegister(
fn binOpImmediate(
self: *Self,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
lhs_and_rhs_swapped: bool,
metadata: ?BinOpMetadata,
) !MCValue {
const lhs_is_register = lhs == .register;
@ -2651,10 +2678,9 @@ fn binOpImmediate(
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
).?;
} else null;
@ -2669,18 +2695,16 @@ fn binOpImmediate(
const dest_reg = switch (mir_tag) {
.cmp => .r0, // cmp has no destination reg
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
else => if (metadata) |md| blk: {
if (lhs_is_register and self.reuseOperand(
inst,
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
md.inst,
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
if (lhs_and_rhs_swapped) 1 else 0,
lhs,
)) {
break :blk lhs_reg;
} else {
break :blk try self.register_manager.allocReg(inst);
break :blk try self.register_manager.allocReg(md.inst);
}
} else try self.register_manager.allocReg(null),
};
@ -2720,6 +2744,12 @@ fn binOpImmediate(
return MCValue{ .register = dest_reg };
}
const BinOpMetadata = struct {
inst: Air.Inst.Index,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
};
/// For all your binary operation needs, this function will generate
/// the corresponding Mir instruction(s). Returns the location of the
/// result.
@ -2735,11 +2765,11 @@ fn binOpImmediate(
fn binOp(
self: *Self,
tag: Air.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
metadata: ?BinOpMetadata,
) InnerError!MCValue {
switch (tag) {
.add,
@ -2780,12 +2810,12 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
} else {
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
@ -2806,7 +2836,7 @@ fn binOp(
// TODO add optimisations for multiplication
// with immediates, for example a * 2 can be
// lowered to a << 1
return try self.binOpRegister(.mul, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata);
} else {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
@ -2826,7 +2856,7 @@ fn binOp(
};
// Generate an add/sub/mul
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
switch (lhs_ty.zigTypeTag()) {
@ -2869,12 +2899,12 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
} else {
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
@ -2903,9 +2933,9 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
} else {
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
@ -2924,7 +2954,7 @@ fn binOp(
};
// Generate a shl_exact/shr_exact
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
switch (tag) {
@ -2964,12 +2994,12 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
} else {
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
},
else => unreachable,
@ -2994,12 +3024,12 @@ fn binOp(
else => unreachable,
};
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
} else {
// convert the offset into a byte offset by
// multiplying it with elem_size
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
return addr;
}
},
@ -3575,7 +3605,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
.inst = inst,
});
break :result switch (int_info.signedness) {
.signed => MCValue{ .compare_flags_signed = op },
@ -3865,7 +3899,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
}
const error_mcv = try self.errUnionErr(operand, ty);
_ = try self.binOp(.cmp_eq, null, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type);
_ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null);
return MCValue{ .compare_flags_unsigned = .gt };
}

View File

@ -481,10 +481,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airBinOp(inst),
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
.add => try self.airBinOp(inst, .add),
.sub => try self.airBinOp(inst, .sub),
.addwrap => try self.airAddWrap(inst),
.add_sat => try self.airAddSat(inst),
.sub, .ptr_sub => try self.airBinOp(inst),
.subwrap => try self.airSubWrap(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airMul(inst),
@ -1091,8 +1095,7 @@ fn binOp(
}
}
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -1103,6 +1106,18 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch});

View File

@ -483,10 +483,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add, .ptr_add => try self.airBinOp(inst),
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
.add => try self.airBinOp(inst, .add),
.addwrap => @panic("TODO try self.airAddWrap(inst)"),
.add_sat => @panic("TODO try self.airAddSat(inst)"),
.sub, .ptr_sub => @panic("TODO try self.airBinOp(inst)"),
.sub => @panic("TODO try self.airBinOp(inst)"),
.subwrap => @panic("TODO try self.airSubWrap(inst)"),
.sub_sat => @panic("TODO try self.airSubSat(inst)"),
.mul => @panic("TODO try self.airMul(inst)"),
@ -827,18 +830,38 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, mcv, .{ .none, .none, .none });
}
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
const tag = self.air.instructions.items(.tag)[inst];
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
.inst = inst,
});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
else
try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
.inst = inst,
});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -1030,7 +1053,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
var int_buffer: Type.Payload.Bits = undefined;
const int_ty = switch (lhs_ty.zigTypeTag()) {
.Vector => unreachable, // Should be handled by cmp_vector?
.Vector => unreachable, // Handled by cmp_vector.
.Enum => lhs_ty.intTagType(&int_buffer),
.Int => lhs_ty,
.Bool => Type.initTag(.u1),
@ -1053,7 +1076,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const int_info = int_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
_ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty);
_ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{
.lhs = bin_op.lhs,
.rhs = bin_op.rhs,
.inst = inst,
});
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
@ -1426,7 +1453,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
// TODO skip the ptr_add emission entirely and use native addressing modes
// i.e sllx/mulx then R+R or scale immediate then R+I
const dest = try self.allocRegOrMem(inst, true);
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ptr_field_type, Type.usize);
const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null);
try self.load(dest, addr, slice_ptr_field_type);
break :result dest;
@ -1595,6 +1622,12 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
return MCValue{ .stack_offset = stack_offset };
}
const BinOpMetadata = struct {
inst: Air.Inst.Index,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
};
/// For all your binary operation needs, this function will generate
/// the corresponding Mir instruction(s). Returns the location of the
/// result.
@ -1610,11 +1643,11 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
fn binOp(
self: *Self,
tag: Air.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
metadata: ?BinOpMetadata,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
switch (tag) {
@ -1649,13 +1682,13 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
} else if (lhs_immediate_ok) {
// swap lhs and rhs
return try self.binOpImmediate(mir_tag, maybe_inst, rhs, lhs, rhs_ty, true);
return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata);
} else {
// TODO convert large immediates to register before adding
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
@ -1683,10 +1716,10 @@ fn binOp(
// If it's a power of two immediate then we emit an shl instead
// TODO add similar checks for LHS
if (new_rhs == .immediate and math.isPowerOfTwo(new_rhs.immediate)) {
return try self.binOp(.shl, maybe_inst, new_lhs, .{ .immediate = math.log2(new_rhs.immediate) }, new_lhs_ty, Type.usize);
return try self.binOp(.shl, new_lhs, .{ .immediate = math.log2(new_rhs.immediate) }, new_lhs_ty, Type.usize, metadata);
}
return try self.binOpRegister(.mulx, maybe_inst, new_lhs, new_rhs, new_lhs_ty, new_rhs_ty);
return try self.binOpRegister(.mulx, new_lhs, new_rhs, new_lhs_ty, new_rhs_ty, metadata);
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
@ -1711,13 +1744,13 @@ fn binOp(
else => unreachable,
};
return try self.binOpRegister(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
} else {
// convert the offset into a byte offset by
// multiplying it with elem_size
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null);
const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null);
return addr;
}
},
@ -1732,7 +1765,7 @@ fn binOp(
};
// Generate a shl_exact/shr_exact
const result = try self.binOp(base_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
// Truncate if necessary
switch (tag) {
@ -1768,9 +1801,9 @@ fn binOp(
};
if (rhs_immediate_ok) {
return try self.binOpImmediate(mir_tag, maybe_inst, lhs, rhs, lhs_ty, false);
return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata);
} else {
return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
@ -1792,18 +1825,17 @@ fn binOp(
/// op dest, lhs, #rhs_imm
///
/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to
/// rhs and vice versa. This parameter is only used when maybe_inst !=
/// null.
/// rhs and vice versa. This parameter is only used when metadata != null.
///
/// Asserts that generating an instruction of that form is possible.
fn binOpImmediate(
self: *Self,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
lhs_and_rhs_swapped: bool,
metadata: ?BinOpMetadata,
) !MCValue {
const lhs_is_register = lhs == .register;
@ -1816,10 +1848,9 @@ fn binOpImmediate(
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
).?;
} else null;
@ -1833,18 +1864,16 @@ fn binOpImmediate(
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
const dest_reg = switch (mir_tag) {
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
else => if (metadata) |md| blk: {
if (lhs_is_register and self.reuseOperand(
inst,
if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs,
md.inst,
if (lhs_and_rhs_swapped) md.rhs else md.lhs,
if (lhs_and_rhs_swapped) 1 else 0,
lhs,
)) {
break :blk lhs_reg;
} else {
break :blk try self.register_manager.allocReg(inst);
break :blk try self.register_manager.allocReg(md.inst);
}
} else blk: {
break :blk try self.register_manager.allocReg(null);
@ -1896,11 +1925,11 @@ fn binOpImmediate(
fn binOpRegister(
self: *Self,
mir_tag: Mir.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
metadata: ?BinOpMetadata,
) !MCValue {
const lhs_is_register = lhs == .register;
const rhs_is_register = rhs == .register;
@ -1920,9 +1949,8 @@ fn binOpRegister(
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.lhs).?;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.lhs).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
@ -1934,9 +1962,8 @@ fn binOpRegister(
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
break :inst Air.refToIndex(bin_op.rhs).?;
const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: {
break :inst Air.refToIndex(md.rhs).?;
} else null;
const reg = try self.register_manager.allocReg(track_inst);
@ -1948,15 +1975,13 @@ fn binOpRegister(
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
const dest_reg = switch (mir_tag) {
else => if (maybe_inst) |inst| blk: {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) {
else => if (metadata) |md| blk: {
if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) {
break :blk lhs_reg;
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
break :blk rhs_reg;
} else {
break :blk try self.register_manager.allocReg(inst);
break :blk try self.register_manager.allocReg(md.inst);
}
} else blk: {
break :blk try self.register_manager.allocReg(null);
@ -3069,11 +3094,11 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const dest = try self.binOp(
.add,
null,
.{ .register = addr_reg },
.{ .register = offset_reg },
Type.usize,
Type.usize,
null,
);
break :result dest;

View File

@ -3397,7 +3397,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
fn airPtrBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!WValue {
if (self.liveness.isUnused(inst)) return WValue{ .none = {} };
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);

View File

@ -574,23 +574,33 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airBinOp(inst),
.addwrap => try self.airBinOp(inst),
.add_sat => try self.airAddSat(inst),
.sub => try self.airBinOp(inst),
.subwrap => try self.airBinOp(inst),
.sub_sat => try self.airSubSat(inst),
.add => try self.airBinOp(inst, .add),
.addwrap => try self.airBinOp(inst, .addwrap),
.sub => try self.airBinOp(inst, .sub),
.subwrap => try self.airBinOp(inst, .subwrap),
.bool_and => try self.airBinOp(inst, .bool_and),
.bool_or => try self.airBinOp(inst, .bool_or),
.bit_and => try self.airBinOp(inst, .bit_and),
.bit_or => try self.airBinOp(inst, .bit_or),
.xor => try self.airBinOp(inst, .xor),
.ptr_add => try self.airPtrArithmetic(inst, .ptr_add),
.ptr_sub => try self.airPtrArithmetic(inst, .ptr_sub),
.shr, .shr_exact => try self.airShlShrBinOp(inst),
.shl, .shl_exact => try self.airShlShrBinOp(inst),
.mul => try self.airMulDivBinOp(inst),
.mulwrap => try self.airMulDivBinOp(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airMulDivBinOp(inst),
.mod => try self.airMulDivBinOp(inst),
.shl, .shl_exact => try self.airShlShrBinOp(inst),
.add_sat => try self.airAddSat(inst),
.sub_sat => try self.airSubSat(inst),
.mul_sat => try self.airMulSat(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
.ptr_add => try self.airBinOp(inst),
.ptr_sub => try self.airBinOp(inst),
.slice => try self.airSlice(inst),
.sqrt,
@ -626,13 +636,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_vector => try self.airCmpVector(inst),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
.bool_and => try self.airBinOp(inst),
.bool_or => try self.airBinOp(inst),
.bit_and => try self.airBinOp(inst),
.bit_or => try self.airBinOp(inst),
.xor => try self.airBinOp(inst),
.shr, .shr_exact => try self.airShlShrBinOp(inst),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
.arg => try self.airArg(inst),
@ -1231,21 +1234,26 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBinOp(self: *Self, inst: Air.Inst.Index) !void {
fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
const tag = self.air.instructions.items(.tag)[inst];
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const result = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
const result = try self.genBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
}
const result = try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -1316,13 +1324,12 @@ fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
try self.spillRegisters(1, .{.rcx});
}
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const partial: MCValue = switch (tag) {
.add_with_overflow => try self.genBinOp(.add, null, lhs, rhs, ty, ty),
.sub_with_overflow => try self.genBinOp(.sub, null, lhs, rhs, ty, ty),
.add_with_overflow => try self.genBinOp(null, .add, bin_op.lhs, bin_op.rhs),
.sub_with_overflow => try self.genBinOp(null, .sub, bin_op.lhs, bin_op.rhs),
.shl_with_overflow => blk: {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const shift_ty = self.air.typeOf(bin_op.rhs);
break :blk try self.genShiftBinOp(.shl, null, lhs, rhs, ty, shift_ty);
},
@ -3310,13 +3317,15 @@ fn genMulDivBinOp(
/// Result is always a register.
fn genBinOp(
self: *Self,
tag: Air.Inst.Tag,
maybe_inst: ?Air.Inst.Index,
lhs: MCValue,
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
tag: Air.Inst.Tag,
lhs_air: Air.Inst.Ref,
rhs_air: Air.Inst.Ref,
) !MCValue {
const lhs = try self.resolveInst(lhs_air);
const rhs = try self.resolveInst(rhs_air);
const lhs_ty = self.air.typeOf(lhs_air);
const rhs_ty = self.air.typeOf(rhs_air);
if (lhs_ty.zigTypeTag() == .Vector or lhs_ty.zigTypeTag() == .Float) {
return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmtDebug()});
}
@ -3352,11 +3361,10 @@ fn genBinOp(
var flipped: bool = false;
const dst_mcv: MCValue = blk: {
if (maybe_inst) |inst| {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (self.reuseOperand(inst, bin_op.lhs, 0, lhs) and lhs.isRegister()) {
if (self.reuseOperand(inst, lhs_air, 0, lhs) and lhs.isRegister()) {
break :blk lhs;
}
if (is_commutative and self.reuseOperand(inst, bin_op.rhs, 1, rhs) and rhs.isRegister()) {
if (is_commutative and self.reuseOperand(inst, rhs_air, 1, rhs) and rhs.isRegister()) {
flipped = true;
break :blk rhs;
}

View File

@ -1711,21 +1711,18 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.unreach => try airUnreach(f),
.fence => try airFence(f, inst),
// TODO use a different strategy for add that communicates to the optimizer
// that wrapping is UB.
.add => try airBinOp (f, inst, " + "),
.ptr_add => try airPtrAddSub (f, inst, " + "),
// TODO use a different strategy for sub that communicates to the optimizer
// that wrapping is UB.
.sub => try airBinOp (f, inst, " - "),
.ptr_sub => try airPtrAddSub (f, inst, " - "),
// TODO use a different strategy for mul that communicates to the optimizer
// that wrapping is UB.
.mul => try airBinOp (f, inst, " * "),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
.ptr_add => try airPtrAddSub(f, inst, " + "),
.ptr_sub => try airPtrAddSub(f, inst, " - "),
// TODO use a different strategy for add, sub, mul, div
// that communicates to the optimizer that wrapping is UB.
.add => try airBinOp (f, inst, " + "),
.sub => try airBinOp (f, inst, " - "),
.mul => try airBinOp (f, inst, " * "),
.div_float, .div_exact => try airBinOp( f, inst, " / "),
.div_trunc => blk: {
.rem => try airBinOp( f, inst, " % "),
.div_trunc => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const lhs_ty = f.air.typeOf(bin_op.lhs);
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
@ -1735,9 +1732,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
else
try airBinOpBuiltinCall(f, inst, "div_trunc");
},
.div_floor => try airBinOpBuiltinCall(f, inst, "div_floor"),
.rem => try airBinOp( f, inst, " % "),
.mod => try airBinOpBuiltinCall(f, inst, "mod"),
.div_floor => try airBinOpBuiltinCall(f, inst, "div_floor"),
.mod => try airBinOpBuiltinCall(f, inst, "mod"),
.addwrap => try airWrapOp(f, inst, " + ", "addw_"),
.subwrap => try airWrapOp(f, inst, " - ", "subw_"),
@ -2617,10 +2613,10 @@ fn airEquality(
}
fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue {
if (f.liveness.isUnused(inst))
return CValue.none;
if (f.liveness.isUnused(inst)) return CValue.none;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);

View File

@ -395,11 +395,11 @@ pub const Object = struct {
return slice.ptr;
}
fn genErrorNameTable(self: *Object, comp: *Compilation) !void {
fn genErrorNameTable(self: *Object) !void {
// If self.error_name_table is null, there was no instruction that actually referenced the error table.
const error_name_table_ptr_global = self.error_name_table orelse return;
const mod = comp.bin_file.options.module.?;
const mod = self.module;
const target = mod.getTarget();
const llvm_ptr_ty = self.context.intType(8).pointerType(0); // TODO: Address space
@ -413,8 +413,8 @@ pub const Object = struct {
const slice_alignment = slice_ty.abiAlignment(target);
const error_name_list = mod.error_name_list.items;
const llvm_errors = try comp.gpa.alloc(*const llvm.Value, error_name_list.len);
defer comp.gpa.free(llvm_errors);
const llvm_errors = try mod.gpa.alloc(*const llvm.Value, error_name_list.len);
defer mod.gpa.free(llvm_errors);
llvm_errors[0] = llvm_slice_ty.getUndef();
for (llvm_errors[1..]) |*llvm_error, i| {
@ -447,10 +447,10 @@ pub const Object = struct {
error_name_table_ptr_global.setInitializer(error_name_table_ptr);
}
fn genCmpLtErrorsLenFunction(object: *Object, comp: *Compilation) !void {
fn genCmpLtErrorsLenFunction(object: *Object) !void {
// If there is no such function in the module, it means the source code does not need it.
const llvm_fn = object.llvm_module.getNamedFunction(lt_errors_fn_name) orelse return;
const mod = comp.bin_file.options.module.?;
const mod = object.module;
const errors_len = mod.global_error_set.count();
// Delete previous implementation. We replace it with every flush() because the
@ -476,10 +476,10 @@ pub const Object = struct {
_ = builder.buildRet(is_lt);
}
fn genModuleLevelAssembly(object: *Object, comp: *Compilation) !void {
const mod = comp.bin_file.options.module.?;
fn genModuleLevelAssembly(object: *Object) !void {
const mod = object.module;
if (mod.global_assembly.count() == 0) return;
var buffer = std.ArrayList(u8).init(comp.gpa);
var buffer = std.ArrayList(u8).init(mod.gpa);
defer buffer.deinit();
var it = mod.global_assembly.iterator();
while (it.next()) |kv| {
@ -489,15 +489,53 @@ pub const Object = struct {
object.llvm_module.setModuleInlineAsm2(buffer.items.ptr, buffer.items.len - 1);
}
fn resolveExportExternCollisions(object: *Object) !void {
const mod = object.module;
const export_keys = mod.decl_exports.keys();
for (mod.decl_exports.values()) |export_list, i| {
const decl_index = export_keys[i];
const llvm_global = object.decl_map.get(decl_index) orelse continue;
for (export_list) |exp| {
// Detect if the LLVM global has already been created as an extern. In such
// case, we need to replace all uses of it with this exported global.
// TODO update std.builtin.ExportOptions to have the name be a
// null-terminated slice.
const exp_name_z = try mod.gpa.dupeZ(u8, exp.options.name);
defer mod.gpa.free(exp_name_z);
const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue;
if (other_global == llvm_global) continue;
// replaceAllUsesWith requires the type to be unchanged. So we bitcast
// the new global to the old type and use that as the thing to replace
// old uses.
const new_global_ptr = llvm_global.constBitCast(other_global.typeOf());
other_global.replaceAllUsesWith(new_global_ptr);
llvm_global.takeName(other_global);
other_global.deleteGlobal();
// Problem: now we need to replace in the decl_map that
// the extern decl index points to this new global. However we don't
// know the decl index.
// Even if we did, a future incremental update to the extern would then
// treat the LLVM global as an extern rather than an export, so it would
// need a way to check that.
// This is a TODO that needs to be solved when making
// the LLVM backend support incremental compilation.
}
}
}
pub fn flushModule(self: *Object, comp: *Compilation, prog_node: *std.Progress.Node) !void {
var sub_prog_node = prog_node.start("LLVM Emit Object", 0);
sub_prog_node.activate();
sub_prog_node.context.refresh();
defer sub_prog_node.end();
try self.genErrorNameTable(comp);
try self.genCmpLtErrorsLenFunction(comp);
try self.genModuleLevelAssembly(comp);
try self.resolveExportExternCollisions();
try self.genErrorNameTable();
try self.genCmpLtErrorsLenFunction();
try self.genModuleLevelAssembly();
if (self.di_builder) |dib| {
// When lowering debug info for pointers, we emitted the element types as
@ -761,6 +799,14 @@ pub const Object = struct {
try self.updateDeclExports(module, decl_index, decl_exports);
}
/// TODO replace this with a call to `Module::getNamedValue`. This will require adding
/// a new wrapper in zig_llvm.h/zig_llvm.cpp.
fn getLlvmGlobal(o: Object, name: [*:0]const u8) ?*const llvm.Value {
if (o.llvm_module.getNamedFunction(name)) |x| return x;
if (o.llvm_module.getNamedGlobal(name)) |x| return x;
return null;
}
pub fn updateDeclExports(
self: *Object,
module: *Module,
@ -827,6 +873,7 @@ pub const Object = struct {
llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
}
}
// If a Decl is exported more than one time (which is rare),
// we add aliases for all but the first export.
// TODO LLVM C API does not support deleting aliases. We need to
@ -5632,7 +5679,8 @@ pub const FuncGen = struct {
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
@ -5651,7 +5699,8 @@ pub const FuncGen = struct {
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = self.builder.buildNeg(offset, "");

View File

@ -114,8 +114,6 @@ const Writer = struct {
.div_exact,
.rem,
.mod,
.ptr_add,
.ptr_sub,
.bit_and,
.bit_or,
.xor,
@ -231,6 +229,12 @@ const Writer = struct {
.slice,
.slice_elem_ptr,
.ptr_elem_ptr,
.ptr_add,
.ptr_sub,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
=> try w.writeTyPlBin(s, inst),
.call,
@ -275,12 +279,6 @@ const Writer = struct {
.reduce => try w.writeReduce(s, inst),
.cmp_vector => try w.writeCmpVector(s, inst),
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
=> try w.writeOverflow(s, inst),
.dbg_block_begin, .dbg_block_end => {},
}
}
@ -478,15 +476,6 @@ const Writer = struct {
try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) });
}
fn writeOverflow(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const extra = w.air.extraData(Air.Bin, ty_pl.payload).data;
try w.writeOperand(s, inst, 0, extra.lhs);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, extra.rhs);
}
fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;

View File

@ -1813,27 +1813,6 @@ pub const Value = extern union {
};
}
/// Asserts the value is numeric
pub fn isZero(self: Value) bool {
return switch (self.tag()) {
.zero, .the_only_possible_value => true,
.one => false,
.int_u64 => self.castTag(.int_u64).?.data == 0,
.int_i64 => self.castTag(.int_i64).?.data == 0,
.float_16 => self.castTag(.float_16).?.data == 0,
.float_32 => self.castTag(.float_32).?.data == 0,
.float_64 => self.castTag(.float_64).?.data == 0,
.float_80 => self.castTag(.float_80).?.data == 0,
.float_128 => self.castTag(.float_128).?.data == 0,
.int_big_positive => self.castTag(.int_big_positive).?.asBigInt().eqZero(),
.int_big_negative => self.castTag(.int_big_negative).?.asBigInt().eqZero(),
else => unreachable,
};
}
pub fn orderAgainstZero(lhs: Value) std.math.Order {
return orderAgainstZeroAdvanced(lhs, null) catch unreachable;
}
@ -3442,7 +3421,6 @@ pub const Value = extern union {
const info = ty.intInfo(target);
if (info.bits == 0) {
assert(val.isZero()); // Sema should guarantee
return val;
}

View File

@ -16,11 +16,22 @@ test "global variable alignment" {
const slice = @as(*align(4) [1]u8, &foo)[0..];
comptime try expect(@TypeOf(slice) == *align(4) [1]u8);
}
{
var runtime_zero: usize = 0;
const slice = @as(*align(4) [1]u8, &foo)[runtime_zero..];
comptime try expect(@TypeOf(slice) == []align(4) u8);
}
}
test "slicing array of length 1 can assume runtime index is always zero" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
// TODO reevaluate this test case, because notice that you can
// change `runtime_zero` to be `1` and the test still passes for stage1.
// Reconsider also this code:
// var array: [4]u8 = undefined;
// var runtime: usize = 4;
// var ptr = array[runtime..];
// _ = ptr;
var runtime_zero: usize = 0;
const slice = @as(*align(4) [1]u8, &foo)[runtime_zero..];
comptime try expect(@TypeOf(slice) == []align(4) u8);
}
test "default alignment allows unspecified in type syntax" {

View File

@ -797,7 +797,11 @@ test "auto created variables have correct alignment" {
}
test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@export(var_to_export, .{ .name = "opaque_extern_var" });
try expect(@ptrCast(*align(1) u32, &opaque_extern_var).* == 42);

View File

@ -326,8 +326,7 @@ test "array coersion to undefined at runtime" {
@setRuntimeSafety(true);
// TODO implement @setRuntimeSafety in stage2
if (builtin.zig_backend != .stage1 and builtin.mode != .Debug and builtin.mode != .ReleaseSafe) {
if (builtin.mode != .Debug and builtin.mode != .ReleaseSafe) {
return error.SkipZigTest;
}
@ -588,7 +587,11 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
}
test "vector casts" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {

View File

@ -1110,3 +1110,10 @@ test "no dependency loop for alignment of self tagged union" {
};
try S.doTheTest();
}
test "equality of pointers to comptime const" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
const a: i32 = undefined;
comptime assert(&a == &a);
}

View File

@ -214,7 +214,10 @@ test "allowzero pointer and slice" {
}
test "assign null directly to C pointer and test null equality" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var x: [*c]i32 = null;
try expect(x == null);
@ -238,7 +241,8 @@ test "assign null directly to C pointer and test null equality" {
@panic("fail");
}
const othery: i32 = undefined;
comptime try expect((y orelse &othery) == &othery);
const ptr_othery = &othery;
comptime try expect((y orelse ptr_othery) == ptr_othery);
var n: i32 = 1234;
var x1: [*c]i32 = &n;
@ -373,8 +377,6 @@ test "pointer to array at fixed address" {
}
test "pointer arithmetic affects the alignment" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
{
var ptr: [*]align(8) u32 = undefined;
var x: usize = 1;