Merge pull request #10996 from joachimschmidt557/stage2-arm

stage2 ARM: implement truncate to integers with <= 32 bits
This commit is contained in:
Joachim Schmidt 2022-02-26 22:50:31 +01:00 committed by GitHub
commit 058e482247
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 298 additions and 138 deletions

View File

@ -439,7 +439,7 @@ fn gen(self: *Self) !void {
// the code. Therefore, we can just delete
// the space initially reserved for the
// jump
self.mir_instructions.len -= 1;
self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.items[0]);
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
@ -749,6 +749,17 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
if (!elem_ty.hasRuntimeBits()) {
// As this stack item will never be dereferenced at runtime,
// return the current stack offset
try self.stack.putNoClobber(self.gpa, self.next_stack_offset, .{
.inst = inst,
.size = 0,
});
return self.next_stack_offset;
}
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty});
};
@ -872,11 +883,61 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
_ = operand;
const info_a = operand_ty.intInfo(self.target.*);
const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
return self.fail("TODO implement trunc for {}", .{self.target.cpu.arch});
// return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
const result: MCValue = blk: {
if (info_b.bits <= 32) {
const operand_reg = switch (operand) {
.register => |r| r,
else => operand_reg: {
if (info_a.bits <= 32) {
break :operand_reg try self.copyToTmpRegister(operand_ty, operand);
} else {
return self.fail("TODO load least significant word into register", .{});
}
},
};
self.register_manager.freezeRegs(&.{operand_reg});
defer self.register_manager.unfreezeRegs(&.{operand_reg});
const dest_reg = dest_reg: {
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :dest_reg operand_reg;
}
break :dest_reg try self.register_manager.allocReg(null);
};
switch (info_b.bits) {
32 => {
try self.genSetReg(operand_ty, dest_reg, .{ .register = operand_reg });
break :blk MCValue{ .register = dest_reg };
},
else => {
_ = try self.addInst(.{
.tag = switch (info_b.signedness) {
.signed => .sbfx,
.unsigned => .ubfx,
},
.data = .{ .rr_lsb_width = .{
.rd = dest_reg,
.rn = operand_reg,
.lsb = 0,
.width = @intCast(u6, info_b.bits),
} },
});
break :blk MCValue{ .register = dest_reg };
},
}
} else {
return self.fail("TODO: truncate to ints > 32 bits", .{});
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void {
@ -1514,7 +1575,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.compare_flags_signed, .compare_flags_unsigned => unreachable,
.embedded_in_code => unreachable,
.register => |dst_reg| {
try self.genLdrRegister(dst_reg, reg, elem_size);
try self.genLdrRegister(dst_reg, reg, elem_ty);
},
.stack_offset => |off| {
if (elem_size <= 4) {
@ -1615,7 +1676,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
switch (value) {
.register => |value_reg| {
try self.genStrRegister(value_reg, addr_reg, @intCast(u32, value_ty.abiSize(self.target.*)));
try self.genStrRegister(value_reg, addr_reg, value_ty);
},
else => {
if (value_ty.abiSize(self.target.*) <= 4) {
@ -2180,68 +2241,71 @@ fn binOp(
}
}
fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, abi_size: u32) !void {
switch (abi_size) {
1, 3, 4 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .ldrb,
3, 4 => .ldr,
else => unreachable,
};
fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void {
const abi_size = ty.abiSize(self.target.*);
_ = try self.addInst(.{
.tag = tag,
.data = .{ .rr_offset = .{
.rt = dest_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.Offset.none },
} },
});
},
2 => {
_ = try self.addInst(.{
.tag = .ldrh,
.data = .{ .rr_extra_offset = .{
.rt = dest_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
} },
});
},
else => unreachable, // invalid abi_size for a register
}
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb,
2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh,
3, 4 => .ldr,
else => unreachable,
};
const rr_offset: Mir.Inst.Data = .{ .rr_offset = .{
.rt = dest_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.Offset.none },
} };
const rr_extra_offset: Mir.Inst.Data = .{ .rr_extra_offset = .{
.rt = dest_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
} };
const data: Mir.Inst.Data = switch (abi_size) {
1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset,
2 => rr_extra_offset,
3, 4 => rr_offset,
else => unreachable,
};
_ = try self.addInst(.{
.tag = tag,
.data = data,
});
}
fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, abi_size: u32) !void {
switch (abi_size) {
1, 3, 4 => {
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
3, 4 => .str,
else => unreachable,
};
fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void {
const abi_size = ty.abiSize(self.target.*);
_ = try self.addInst(.{
.tag = tag,
.data = .{ .rr_offset = .{
.rt = source_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.Offset.none },
} },
});
},
2 => {
_ = try self.addInst(.{
.tag = .strh,
.data = .{ .rr_extra_offset = .{
.rt = source_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
} },
});
},
else => unreachable, // invalid abi_size for a register
}
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .strb,
2 => .strh,
3, 4 => .str,
else => unreachable,
};
const rr_offset: Mir.Inst.Data = .{ .rr_offset = .{
.rt = source_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.Offset.none },
} };
const rr_extra_offset: Mir.Inst.Data = .{ .rr_extra_offset = .{
.rt = source_reg,
.rn = addr_reg,
.offset = .{ .offset = Instruction.ExtraLoadStoreOffset.none },
} };
const data: Mir.Inst.Data = switch (abi_size) {
1, 3, 4 => rr_offset,
2 => rr_extra_offset,
else => unreachable,
};
_ = try self.addInst(.{
.tag = tag,
.data = data,
});
}
fn genInlineMemcpy(
@ -2834,8 +2898,6 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
}
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
_ = operand;
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
@ -3569,55 +3631,59 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = @intCast(u32, addr) });
try self.genLdrRegister(reg, reg, @intCast(u32, ty.abiSize(self.target.*)));
try self.genLdrRegister(reg, reg, ty);
},
.stack_offset => |unadjusted_off| {
// TODO: maybe addressing from sp instead of fp
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const adj_off = unadjusted_off + abi_size;
switch (abi_size) {
1, 4 => {
const offset = if (adj_off <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, adj_off));
} else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), .none);
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb,
2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh,
3, 4 => .ldr,
else => unreachable,
};
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .ldrb,
4 => .ldr,
else => unreachable,
};
const extra_offset = switch (abi_size) {
1 => ty.isSignedInt(),
2 => true,
3, 4 => false,
else => unreachable,
};
_ = try self.addInst(.{
.tag = tag,
.data = .{ .rr_offset = .{
.rt = reg,
.rn = .fp,
.offset = .{
.offset = offset,
.positive = false,
},
} },
});
},
2 => {
const offset = if (adj_off <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
if (extra_offset) {
const offset = if (adj_off <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off));
} else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }));
_ = try self.addInst(.{
.tag = .ldrh,
.data = .{ .rr_extra_offset = .{
.rt = reg,
.rn = .fp,
.offset = .{
.offset = offset,
.positive = false,
},
} },
});
},
else => return self.fail("TODO a type of size {} is not allowed in a register", .{abi_size}),
_ = try self.addInst(.{
.tag = tag,
.data = .{ .rr_extra_offset = .{
.rt = reg,
.rn = .fp,
.offset = .{
.offset = offset,
.positive = false,
},
} },
});
} else {
const offset = if (adj_off <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, adj_off));
} else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), .none);
_ = try self.addInst(.{
.tag = tag,
.data = .{ .rr_offset = .{
.rt = reg,
.rn = .fp,
.offset = .{
.offset = offset,
.positive = false,
},
} },
});
}
},
.stack_argument_offset => |unadjusted_off| {
@ -3625,9 +3691,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
const adj_off = unadjusted_off + abi_size;
const tag: Mir.Inst.Tag = switch (abi_size) {
1 => .ldrb_stack_argument,
2 => .ldrh_stack_argument,
4 => .ldr_stack_argument,
1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument,
2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument,
3, 4 => .ldr_stack_argument,
else => unreachable,
};

View File

@ -115,8 +115,12 @@ pub fn emitMir(
.ldr_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrb_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrh_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrsb_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrsh_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrh => try emit.mirLoadStoreExtra(inst),
.ldrsb => try emit.mirLoadStore(inst),
.ldrsh => try emit.mirLoadStoreExtra(inst),
.strh => try emit.mirLoadStoreExtra(inst),
.movw => try emit.mirSpecialMove(inst),
@ -130,6 +134,9 @@ pub fn emitMir(
.push => try emit.mirBlockDataTransfer(inst),
.svc => try emit.mirSupervisorCall(inst),
.sbfx => try emit.mirBitFieldExtract(inst),
.ubfx => try emit.mirBitFieldExtract(inst),
}
}
}
@ -590,36 +597,42 @@ fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
const raw_offset = emit.prologue_stack_space - r_stack_offset.stack_offset;
switch (tag) {
.ldr_stack_argument => {
.ldr_stack_argument,
.ldrb_stack_argument,
=> {
const offset = if (raw_offset <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
try emit.writeInstruction(Instruction.ldr(
const ldr = switch (tag) {
.ldr_stack_argument => Instruction.ldr,
.ldrb_stack_argument => Instruction.ldrb,
else => unreachable,
};
try emit.writeInstruction(ldr(
cond,
r_stack_offset.rt,
.fp,
.{ .offset = offset },
));
},
.ldrb_stack_argument => {
const offset = if (raw_offset <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@intCast(u12, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
try emit.writeInstruction(Instruction.ldrb(
cond,
r_stack_offset.rt,
.fp,
.{ .offset = offset },
));
},
.ldrh_stack_argument => {
.ldrh_stack_argument,
.ldrsb_stack_argument,
.ldrsh_stack_argument,
=> {
const offset = if (raw_offset <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, raw_offset));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
try emit.writeInstruction(Instruction.ldrh(
const ldr = switch (tag) {
.ldrh_stack_argument => Instruction.ldrh,
.ldrsb_stack_argument => Instruction.ldrsb,
.ldrsh_stack_argument => Instruction.ldrsh,
else => unreachable,
};
try emit.writeInstruction(ldr(
cond,
r_stack_offset.rt,
.fp,
@ -637,6 +650,8 @@ fn mirLoadStoreExtra(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.ldrh => try emit.writeInstruction(Instruction.ldrh(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
.ldrsb => try emit.writeInstruction(Instruction.ldrsb(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
.ldrsh => try emit.writeInstruction(Instruction.ldrsh(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
.strh => try emit.writeInstruction(Instruction.strh(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
else => unreachable,
}
@ -691,3 +706,19 @@ fn mirSupervisorCall(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
}
}
fn mirBitFieldExtract(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rr_lsb_width = emit.mir.instructions.items(.data)[inst].rr_lsb_width;
const rd = rr_lsb_width.rd;
const rn = rr_lsb_width.rn;
const lsb = rr_lsb_width.lsb;
const width = rr_lsb_width.width;
switch (tag) {
.sbfx => try emit.writeInstruction(Instruction.sbfx(cond, rd, rn, lsb, width)),
.ubfx => try emit.writeInstruction(Instruction.ubfx(cond, rd, rn, lsb, width)),
else => unreachable,
}
}

View File

@ -64,6 +64,14 @@ pub const Inst = struct {
ldrh,
/// Load Register Halfword
ldrh_stack_argument,
/// Load Register Signed Byte
ldrsb,
/// Load Register Signed Byte
ldrsb_stack_argument,
/// Load Register Signed Halfword
ldrsh,
/// Load Register Signed Halfword
ldrsh_stack_argument,
/// Logical Shift Left
lsl,
/// Logical Shift Right
@ -88,6 +96,8 @@ pub const Inst = struct {
push,
/// Reverse Subtract
rsb,
/// Signed Bit Field Extract
sbfx,
/// Store Register
str,
/// Store Register Byte
@ -98,6 +108,8 @@ pub const Inst = struct {
sub,
/// Supervisor Call
svc,
/// Unsigned Bit Field Extract
ubfx,
};
/// The position of an MIR instruction within the `Mir` instructions array.
@ -179,6 +191,16 @@ pub const Inst = struct {
rn: Register,
offset: bits.Instruction.ExtraLoadStoreOffsetArgs,
},
/// Two registers and a lsb (range 0-31) and a width (range
/// 1-32)
///
/// Used by e.g. sbfx
rr_lsb_width: struct {
rd: Register,
rn: Register,
lsb: u5,
width: u6,
},
/// Three registers
///
/// Used by e.g. mul

View File

@ -1,5 +1,6 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
/// The condition field specifies the flags necessary for an
@ -237,6 +238,17 @@ pub const Instruction = union(enum) {
fixed_3: u5 = 0b00010,
cond: u4,
},
bit_field_extract: packed struct {
rn: u4,
fixed_1: u3 = 0b101,
lsb: u5,
rd: u4,
widthm1: u5,
fixed_2: u1 = 0b1,
unsigned: u1,
fixed_3: u5 = 0b01111,
cond: u4,
},
single_data_transfer: packed struct {
offset: u12,
rd: u4,
@ -576,6 +588,7 @@ pub const Instruction = union(enum) {
.multiply => |v| @bitCast(u32, v),
.multiply_long => |v| @bitCast(u32, v),
.integer_saturating_arithmetic => |v| @bitCast(u32, v),
.bit_field_extract => |v| @bitCast(u32, v),
.single_data_transfer => |v| @bitCast(u32, v),
.extra_load_store => |v| @bitCast(u32, v),
.block_data_transfer => |v| @bitCast(u32, v),
@ -691,6 +704,27 @@ pub const Instruction = union(enum) {
};
}
fn bitFieldExtract(
unsigned: u1,
cond: Condition,
rd: Register,
rn: Register,
lsb: u5,
width: u6,
) Instruction {
assert(width > 0 and width <= 32);
return Instruction{
.bit_field_extract = .{
.rn = rn.id(),
.lsb = lsb,
.rd = rd.id(),
.widthm1 = @intCast(u5, width - 1),
.unsigned = unsigned,
.cond = @enumToInt(cond),
},
};
}
fn singleDataTransfer(
cond: Condition,
rd: Register,
@ -1044,6 +1078,16 @@ pub const Instruction = union(enum) {
return multiplyLong(cond, 1, 1, 1, rdhi, rdlo, rm, rn);
}
// Bit field extract
pub fn ubfx(cond: Condition, rd: Register, rn: Register, lsb: u5, width: u6) Instruction {
return bitFieldExtract(0b1, cond, rd, rn, lsb, width);
}
pub fn sbfx(cond: Condition, rd: Register, rn: Register, lsb: u5, width: u6) Instruction {
return bitFieldExtract(0b0, cond, rd, rn, lsb, width);
}
// Single data transfer
pub const OffsetArgs = struct {
@ -1079,11 +1123,19 @@ pub const Instruction = union(enum) {
};
pub fn strh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction {
return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0, 0b01, rn, rt, args.offset);
return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0b0, 0b01, rn, rt, args.offset);
}
pub fn ldrh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction {
return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 1, 0b01, rn, rt, args.offset);
return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0b1, 0b01, rn, rt, args.offset);
}
pub fn ldrsh(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction {
return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0b1, 0b11, rn, rt, args.offset);
}
pub fn ldrsb(cond: Condition, rt: Register, rn: Register, args: ExtraLoadStoreOffsetArgs) Instruction {
return extraLoadStore(cond, args.pre_index, args.positive, args.write_back, 0b1, 0b10, rn, rt, args.offset);
}
// Block data transfer

View File

@ -16,7 +16,6 @@ test "empty function with comments" {
test "truncate" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(testTruncate(0x10fd) == 0xfd);
comptime try expect(testTruncate(0x10fd) == 0xfd);
@ -27,7 +26,6 @@ fn testTruncate(x: u32) u8 {
test "truncate to non-power-of-two integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
try testTrunc(u32, u1, 0b10110, 0b0);

View File

@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "truncate u0 to larger integer allowed and has comptime known result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: u0 = 0;
const y = @truncate(u8, x);
@ -13,7 +12,6 @@ test "truncate u0 to larger integer allowed and has comptime known result" {
test "truncate.u0.literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var z = @truncate(u0, 0);
try expect(z == 0);
@ -21,7 +19,6 @@ test "truncate.u0.literal" {
test "truncate.u0.const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const c0: usize = 0;
var z = @truncate(u0, c0);
@ -30,7 +27,6 @@ test "truncate.u0.const" {
test "truncate.u0.var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var d: u8 = 2;
var z = @truncate(u0, d);
@ -39,7 +35,6 @@ test "truncate.u0.var" {
test "truncate i0 to larger integer allowed and has comptime known result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: i0 = 0;
const y = @truncate(i8, x);
@ -48,7 +43,6 @@ test "truncate i0 to larger integer allowed and has comptime known result" {
test "truncate.i0.literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var z = @truncate(i0, 0);
try expect(z == 0);
@ -56,7 +50,6 @@ test "truncate.i0.literal" {
test "truncate.i0.const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const c0: isize = 0;
var z = @truncate(i0, c0);
@ -65,7 +58,6 @@ test "truncate.i0.const" {
test "truncate.i0.var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var d: i8 = 2;
var z = @truncate(i0, d);
@ -74,7 +66,6 @@ test "truncate.i0.var" {
test "truncate on comptime integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x = @truncate(u16, 9999);
try expect(x == 9999);