diff --git a/ci/zinc/linux_test.sh b/ci/zinc/linux_test.sh index 8f3eaacc7e..453269029d 100755 --- a/ci/zinc/linux_test.sh +++ b/ci/zinc/linux_test.sh @@ -7,8 +7,9 @@ ZIG=$DEBUG_STAGING/bin/zig $ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM $ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin $ZIG test test/behavior.zig -fno-stage1 -I test -ofmt=c -$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin -$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin +$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin +$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin +$ZIG test test/behavior.zig -fno-stage1 -I test -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin $ZIG test test/behavior.zig -fno-stage1 -I test $ZIG build test-behavior -fqemu -fwasmtime diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 79fa38e275..b9d5a29f18 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -511,10 +511,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { switch (air_tags[inst]) { // zig fmt: off - .add, .ptr_add => try self.airAdd(inst), + .add, .ptr_add => try self.airBinOp(inst), .addwrap => try self.airAddWrap(inst), .add_sat => try self.airAddSat(inst), - .sub, .ptr_sub => try self.airSub(inst), + .sub, .ptr_sub => try self.airBinOp(inst), .subwrap => try self.airSubWrap(inst), .sub_sat => try self.airSubSat(inst), .mul => try self.airMul(inst), @@ -894,6 +894,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); + const operand_ty = self.air.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -923,12 +924,19 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }; break :result r; }, - else => {}, + else => { + switch (operand_ty.zigTypeTag()) { + .Bool => { + // TODO convert this to mvn + and + const dest = try self.binOp(.xor, null, operand, .{ .immediate = 1 }, operand_ty, Type.bool); + break :result dest; + }, + else => return self.fail("TODO bitwise not", .{}), + } + }, } - - return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}); }; - _ = result; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airMin(self: *Self, inst: Air.Inst.Index) !void { @@ -950,9 +958,306 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airAdd(self: *Self, inst: Air.Inst.Index) !void { +/// Don't call this function directly. Use binOp instead. +/// +/// Calling this function signals an intention to generate a Mir +/// instruction of the form +/// +/// op dest, lhs, rhs +/// +/// Asserts that generating an instruction of that form is possible. +fn binOpRegister( + self: *Self, + tag: Air.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + rhs_ty: Type, +) !MCValue { + const lhs_is_register = lhs == .register; + const rhs_is_register = rhs == .register; + + if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register}); + if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register}); + + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + + const lhs_reg = if (lhs_is_register) lhs.register else blk: { + const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + break :inst Air.refToIndex(bin_op.lhs).?; + } else null; + + const reg = try self.register_manager.allocReg(track_inst); + self.register_manager.freezeRegs(&.{reg}); + + if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + break :blk reg; + }; + defer self.register_manager.unfreezeRegs(&.{lhs_reg}); + + const rhs_reg = if (rhs_is_register) rhs.register else blk: { + const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + break :inst Air.refToIndex(bin_op.rhs).?; + } else null; + + const reg = try self.register_manager.allocReg(track_inst); + self.register_manager.freezeRegs(&.{reg}); + + if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + break :blk reg; + }; + defer self.register_manager.unfreezeRegs(&.{rhs_reg}); + + const dest_reg = if (maybe_inst) |inst| blk: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + + if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { + break :blk lhs_reg; + } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) { + break :blk rhs_reg; + } else { + break :blk try self.register_manager.allocReg(inst); + } + } else try self.register_manager.allocReg(null); + + if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add, .ptr_add => .add_shifted_register, + .sub, .ptr_sub => .sub_shifted_register, + .xor => .eor_shifted_register, + else => unreachable, + }; + const mir_data: Mir.Inst.Data = switch (tag) { + .add, + .sub, + .ptr_add, + .ptr_sub, + => .{ .rrr_imm6_shift = .{ + .rd = dest_reg, + .rn = lhs_reg, + .rm = rhs_reg, + .imm6 = 0, + .shift = .lsl, + } }, + .xor => .{ .rrr_imm6_logical_shift = .{ + .rd = dest_reg, + .rn = lhs_reg, + .rm = rhs_reg, + .imm6 = 0, + .shift = .lsl, + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + +/// Don't call this function directly. Use binOp instead. +/// +/// Calling this function signals an intention to generate a Mir +/// instruction of the form +/// +/// op dest, lhs, #rhs_imm +/// +/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to +/// rhs and vice versa. This parameter is only used when maybe_inst != +/// null. +/// +/// Asserts that generating an instruction of that form is possible. +fn binOpImmediate( + self: *Self, + tag: Air.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + lhs_and_rhs_swapped: bool, +) !MCValue { + const lhs_is_register = lhs == .register; + + if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register}); + + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + + const lhs_reg = if (lhs_is_register) lhs.register else blk: { + const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + break :inst Air.refToIndex( + if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs, + ).?; + } else null; + + const reg = try self.register_manager.allocReg(track_inst); + self.register_manager.freezeRegs(&.{reg}); + + if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + break :blk reg; + }; + defer self.register_manager.unfreezeRegs(&.{lhs_reg}); + + const dest_reg = if (maybe_inst) |inst| blk: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + + if (lhs_is_register and self.reuseOperand( + inst, + if (lhs_and_rhs_swapped) bin_op.rhs else bin_op.lhs, + if (lhs_and_rhs_swapped) 1 else 0, + lhs, + )) { + break :blk lhs_reg; + } else { + break :blk try self.register_manager.allocReg(inst); + } + } else try self.register_manager.allocReg(null); + + if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add => .add_immediate, + .sub => .sub_immediate, + else => unreachable, + }; + const mir_data: Mir.Inst.Data = switch (tag) { + .add, + .sub, + => .{ .rr_imm12_sh = .{ + .rd = dest_reg, + .rn = lhs_reg, + .imm12 = @intCast(u12, rhs.immediate), + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + +/// For all your binary operation needs, this function will generate +/// the corresponding Mir instruction(s). Returns the location of the +/// result. +/// +/// If the binary operation itself happens to be an Air instruction, +/// pass the corresponding index in the inst parameter. That helps +/// this function do stuff like reusing operands. +/// +/// This function does not do any lowering to Mir itself, but instead +/// looks at the lhs and rhs and determines which kind of lowering +/// would be best suitable and then delegates the lowering to other +/// functions. +fn binOp( + self: *Self, + tag: Air.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + rhs_ty: Type, +) !MCValue { + switch (tag) { + // Arithmetic operations on integers and floats + .add, + .sub, + => { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO binary operations on floats", .{}), + .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Int => { + assert(lhs_ty.eql(rhs_ty)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 64) { + // Only say yes if the operation is + // commutative, i.e. we can swap both of the + // operands + const lhs_immediate_ok = switch (tag) { + .add => lhs == .immediate and lhs.immediate <= std.math.maxInt(u12), + .sub => false, + else => unreachable, + }; + const rhs_immediate_ok = switch (tag) { + .add, + .sub, + => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), + else => unreachable, + }; + + if (rhs_immediate_ok) { + return try self.binOpImmediate(tag, maybe_inst, lhs, rhs, lhs_ty, false); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + return try self.binOpImmediate(tag, maybe_inst, rhs, lhs, rhs_ty, true); + } else { + return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } + } else { + return self.fail("TODO binary operations on int with bits > 64", .{}); + } + }, + else => unreachable, + } + }, + // Bitwise operations on integers + .xor => { + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Int => return self.fail("TODO binary operations on vectors", .{}), + .Bool => { + assert(lhs_ty.eql(rhs_ty)); + // TODO boolean operations with immediates + return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + }, + else => unreachable, + } + }, + .ptr_add, + .ptr_sub, + => { + switch (lhs_ty.zigTypeTag()) { + .Pointer => { + const ptr_ty = lhs_ty; + const pointee_ty = switch (ptr_ty.ptrSize()) { + .One => ptr_ty.childType().childType(), // ptr to array, so get array element type + else => ptr_ty.childType(), + }; + + if (pointee_ty.abiSize(self.target.*) > 1) { + return self.fail("TODO ptr_add, ptr_sub with more element sizes", .{}); + } + + return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + }, + else => unreachable, + } + }, + else => unreachable, + } +} + +fn airBinOp(self: *Self, inst: Air.Inst.Index) !void { + const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add for {}", .{self.target.cpu.arch}); + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.air.typeOf(bin_op.lhs); + const rhs_ty = self.air.typeOf(bin_op.rhs); + + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -968,12 +1273,6 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airSub(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); -} - fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}); @@ -1098,13 +1397,26 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_ty = self.air.typeOf(ty_op.operand); + const payload_ty = error_union_ty.errorUnionPayload(); + const mcv = try self.resolveInst(ty_op.operand); + if (!payload_ty.hasRuntimeBits()) break :result mcv; + + return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_ty = self.air.typeOf(ty_op.operand); + const payload_ty = error_union_ty.errorUnionPayload(); + if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + + return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -1146,7 +1458,14 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_ty = self.air.getRefType(ty_op.ty); + const payload_ty = error_union_ty.errorUnionPayload(); + const mcv = try self.resolveInst(ty_op.operand); + if (!payload_ty.hasRuntimeBits()) break :result mcv; + + return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -1158,7 +1477,20 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_len for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mcv = try self.resolveInst(ty_op.operand); + switch (mcv) { + .dead, .unreach => unreachable, + .register => unreachable, // a slice doesn't fit in one register + .stack_offset => |off| { + break :result MCValue{ .stack_offset = off }; + }, + .memory => |addr| { + break :result MCValue{ .memory = addr + 8 }; + }, + else => return self.fail("TODO implement slice_len for {}", .{mcv}), + } + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -1177,10 +1509,114 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_val for {}", .{self.target.cpu.arch}); + + if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + const result: MCValue = result: { + const slice_mcv = try self.resolveInst(bin_op.lhs); + + // TODO optimize for the case where the index is a constant, + // i.e. index_mcv == .immediate + const index_mcv = try self.resolveInst(bin_op.rhs); + const index_is_register = index_mcv == .register; + + const slice_ty = self.air.typeOf(bin_op.lhs); + const elem_ty = slice_ty.childType(); + const elem_size = elem_ty.abiSize(self.target.*); + + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + + if (index_is_register) self.register_manager.freezeRegs(&.{index_mcv.register}); + defer if (index_is_register) self.register_manager.unfreezeRegs(&.{index_mcv.register}); + + const base_mcv: MCValue = switch (slice_mcv) { + .stack_offset => |off| .{ .register = try self.copyToTmpRegister(slice_ptr_field_type, .{ .stack_offset = off + 8 }) }, + else => return self.fail("TODO slice_elem_val when slice is {}", .{slice_mcv}), + }; + self.register_manager.freezeRegs(&.{base_mcv.register}); + + // TODO implement optimized ldr for airSliceElemVal + const dst_mcv = try self.allocRegOrMem(inst, true); + + const offset_mcv = try self.genMulConstant(bin_op.rhs, @intCast(u32, elem_size)); + assert(offset_mcv == .register); // result of multiplication should always be register + self.register_manager.freezeRegs(&.{offset_mcv.register}); + + const addr_reg = try self.register_manager.allocReg(null); + self.register_manager.freezeRegs(&.{addr_reg}); + defer self.register_manager.unfreezeRegs(&.{addr_reg}); + + _ = try self.addInst(.{ + .tag = .add_shifted_register, + .data = .{ .rrr_imm6_shift = .{ + .rd = addr_reg, + .rn = base_mcv.register, + .rm = offset_mcv.register, + .imm6 = 0, + .shift = .lsl, + } }, + }); + + // At this point in time, neither the base register + // nor the offset register contains any valuable data + // anymore. + self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register }); + + try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type); + + break :result dst_mcv; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn genMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue { + const lhs = try self.resolveInst(op); + const rhs = MCValue{ .immediate = imm }; + + const lhs_is_register = lhs == .register; + + if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register}); + defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register}); + + // Destination must be a register + // LHS must be a register + // RHS must be a register + var dst_mcv: MCValue = undefined; + var lhs_mcv: MCValue = lhs; + var rhs_mcv: MCValue = rhs; + + // Allocate registers for operands and/or destination + // Allocate 1 or 2 registers + if (lhs_is_register) { + // Move RHS to register + dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) }; + rhs_mcv = dst_mcv; + } else { + // Move LHS and RHS to register + const regs = try self.register_manager.allocRegs(2, .{ null, null }); + lhs_mcv = MCValue{ .register = regs[0] }; + rhs_mcv = MCValue{ .register = regs[1] }; + dst_mcv = lhs_mcv; + } + + // Move the operands to the newly allocated registers + if (!lhs_is_register) { + try self.genSetReg(self.air.typeOf(op), lhs_mcv.register, lhs); + } + try self.genSetReg(Type.initTag(.usize), rhs_mcv.register, rhs); + + _ = try self.addInst(.{ + .tag = .mul, + .data = .{ .rrr = .{ + .rd = dst_mcv.register, + .rn = lhs_mcv.register, + .rm = rhs_mcv.register, + } }, + }); + + return dst_mcv; +} + fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -1295,8 +1731,74 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .embedded_in_code => { return self.fail("TODO implement loading from MCValue.embedded_in_code", .{}); }, - .register => { - return self.fail("TODO implement loading from MCValue.register for {}", .{self.target.cpu.arch}); + .register => |addr_reg| { + self.register_manager.freezeRegs(&.{addr_reg}); + defer self.register_manager.unfreezeRegs(&.{addr_reg}); + + switch (dst_mcv) { + .dead => unreachable, + .undef => unreachable, + .compare_flags_signed, .compare_flags_unsigned => unreachable, + .embedded_in_code => unreachable, + .register => |dst_reg| { + _ = try self.addInst(.{ + .tag = .ldr_immediate, + .data = .{ .load_store_register_immediate = .{ + .rt = dst_reg, + .rn = addr_reg, + .offset = Instruction.LoadStoreOffset.none.immediate, + } }, + }); + }, + .stack_offset => |off| { + if (elem_ty.abiSize(self.target.*) <= 8) { + const tmp_reg = try self.register_manager.allocReg(null); + self.register_manager.freezeRegs(&.{tmp_reg}); + defer self.register_manager.unfreezeRegs(&.{tmp_reg}); + + try self.load(.{ .register = tmp_reg }, ptr, ptr_ty); + try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg }); + } else { + // TODO optimize the register allocation + const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }); + self.register_manager.freezeRegs(®s); + defer self.register_manager.unfreezeRegs(®s); + + const src_reg = addr_reg; + const dst_reg = regs[0]; + const len_reg = regs[1]; + const count_reg = regs[2]; + const tmp_reg = regs[3]; + + // sub dst_reg, fp, #off + const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const adj_off = off + elem_size; + const offset = math.cast(u12, adj_off) catch return self.fail("TODO load: larger stack offsets", .{}); + _ = try self.addInst(.{ + .tag = .sub_immediate, + .data = .{ .rr_imm12_sh = .{ + .rd = dst_reg, + .rn = .x29, + .imm12 = offset, + } }, + }); + + // mov len, #elem_size + const len_imm = math.cast(u16, elem_size) catch return self.fail("TODO load: larger stack offsets", .{}); + _ = try self.addInst(.{ + .tag = .movk, + .data = .{ .r_imm16_sh = .{ + .rd = len_reg, + .imm16 = len_imm, + } }, + }); + + // memcpy(src, dst, len) + try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); + } + }, + else => return self.fail("TODO load from register into {}", .{dst_mcv}), + } }, .memory, .stack_offset, @@ -1311,6 +1813,84 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } } +fn genInlineMemcpy( + self: *Self, + src: Register, + dst: Register, + len: Register, + count: Register, + tmp: Register, +) !void { + // movk count, #0 + _ = try self.addInst(.{ + .tag = .movk, + .data = .{ .r_imm16_sh = .{ + .rd = count, + .imm16 = 0, + } }, + }); + + // loop: + // cmp count, len + _ = try self.addInst(.{ + .tag = .cmp_shifted_register, + .data = .{ .rrr_imm6_shift = .{ + .rd = .xzr, + .rn = count, + .rm = len, + .imm6 = 0, + .shift = .lsl, + } }, + }); + + // bge end + _ = try self.addInst(.{ + .tag = .b_cond, + .data = .{ .inst_cond = .{ + .inst = @intCast(u32, self.mir_instructions.len + 5), + .cond = .ge, + } }, + }); + + // ldrb tmp, [src, count] + _ = try self.addInst(.{ + .tag = .ldrb_register, + .data = .{ .load_store_register_register = .{ + .rt = tmp, + .rn = src, + .offset = Instruction.LoadStoreOffset.reg(count).register, + } }, + }); + + // strb tmp, [dest, count] + _ = try self.addInst(.{ + .tag = .strb_register, + .data = .{ .load_store_register_register = .{ + .rt = tmp, + .rn = dst, + .offset = Instruction.LoadStoreOffset.reg(count).register, + } }, + }); + + // add count, count, #1 + _ = try self.addInst(.{ + .tag = .add_immediate, + .data = .{ .rr_imm12_sh = .{ + .rd = count, + .rn = count, + .imm12 = 1, + } }, + }); + + // b loop + _ = try self.addInst(.{ + .tag = .b, + .data = .{ .inst = @intCast(u32, self.mir_instructions.len - 5) }, + }); + + // end: +} + fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); @@ -1337,11 +1917,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn airStore(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr = try self.resolveInst(bin_op.lhs); - const value = try self.resolveInst(bin_op.rhs); - const elem_ty = self.air.typeOf(bin_op.rhs); +fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1350,13 +1926,13 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void { .compare_flags_unsigned => unreachable, .compare_flags_signed => unreachable, .immediate => |imm| { - try self.setRegOrMem(elem_ty, .{ .memory = imm }, value); + try self.setRegOrMem(value_ty, .{ .memory = imm }, value); }, .ptr_stack_offset => |off| { - try self.genSetStack(elem_ty, off, value); + try self.genSetStack(value_ty, off, value); }, .ptr_embedded_in_code => |off| { - try self.setRegOrMem(elem_ty, .{ .embedded_in_code = off }, value); + try self.setRegOrMem(value_ty, .{ .embedded_in_code = off }, value); }, .embedded_in_code => { return self.fail("TODO implement storing to MCValue.embedded_in_code", .{}); @@ -1364,33 +1940,55 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void { .register => { return self.fail("TODO implement storing to MCValue.register", .{}); }, - .memory => { - return self.fail("TODO implement storing to MCValue.memory", .{}); - }, - .stack_offset => { - return self.fail("TODO implement storing to MCValue.stack_offset", .{}); + .memory, + .stack_offset, + => { + const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr); + try self.store(.{ .register = addr_reg }, value, ptr_ty, value_ty); }, } +} + +fn airStore(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const ptr = try self.resolveInst(bin_op.lhs); + const value = try self.resolveInst(bin_op.rhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); + const value_ty = self.air.typeOf(bin_op.rhs); + + try self.store(ptr, value, ptr_ty, value_ty); + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - return self.structFieldPtr(extra.struct_operand, ty_pl.ty, extra.field_index); + const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); + return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - return self.structFieldPtr(ty_op.operand, ty_op.ty, index); + const result = try self.structFieldPtr(inst, ty_op.operand, index); + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn structFieldPtr(self: *Self, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !void { - _ = self; - _ = operand; - _ = ty; - _ = index; - return self.fail("TODO implement codegen struct_field_ptr", .{}); - //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none }); + +fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + return if (self.liveness.isUnused(inst)) .dead else result: { + const mcv = try self.resolveInst(operand); + const struct_ty = self.air.typeOf(operand).childType(); + const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_ty = struct_ty.structFieldType(index); + const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)); + switch (mcv) { + .ptr_stack_offset => |off| { + break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size }; + }, + else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}), + } + }; } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { @@ -1487,49 +2085,55 @@ fn airFence(self: *Self) !void { fn airCall(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const fn_ty = self.air.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + const ty = self.air.typeOf(callee); + + const fn_ty = switch (ty.zigTypeTag()) { + .Fn => ty, + .Pointer => ty.childType(), + else => unreachable, + }; var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); + for (info.args) |mc_arg, arg_i| { + const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); + const arg_mcv = try self.resolveInst(args[arg_i]); + + switch (mc_arg) { + .none => continue, + .undef => unreachable, + .immediate => unreachable, + .unreach => unreachable, + .dead => unreachable, + .embedded_in_code => unreachable, + .memory => unreachable, + .compare_flags_signed => unreachable, + .compare_flags_unsigned => unreachable, + .register => |reg| { + try self.register_manager.getReg(reg, null); + try self.genSetReg(arg_ty, reg, arg_mcv); + }, + .stack_offset => { + return self.fail("TODO implement calling with parameters in memory", .{}); + }, + .ptr_stack_offset => { + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + }, + .ptr_embedded_in_code => { + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + }, + } + } + // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) { - for (info.args) |mc_arg, arg_i| { - const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); - const arg_mcv = try self.resolveInst(args[arg_i]); - - switch (mc_arg) { - .none => continue, - .undef => unreachable, - .immediate => unreachable, - .unreach => unreachable, - .dead => unreachable, - .embedded_in_code => unreachable, - .memory => unreachable, - .compare_flags_signed => unreachable, - .compare_flags_unsigned => unreachable, - .register => |reg| { - try self.register_manager.getReg(reg, null); - try self.genSetReg(arg_ty, reg, arg_mcv); - }, - .stack_offset => { - return self.fail("TODO implement calling with parameters in memory", .{}); - }, - .ptr_stack_offset => { - return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); - }, - .ptr_embedded_in_code => { - return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); - }, - } - } - - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee)) |func_value| { + if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -1553,45 +2157,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { } else { return self.fail("TODO implement calling bitcasted functions", .{}); } - } else { - return self.fail("TODO implement calling runtime known function pointer", .{}); - } - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - for (info.args) |mc_arg, arg_i| { - const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); - const arg_mcv = try self.resolveInst(args[arg_i]); - // Here we do not use setRegOrMem even though the logic is similar, because - // the function call will move the stack pointer, so the offsets are different. - switch (mc_arg) { - .none => continue, - .register => |reg| { - try self.register_manager.getReg(reg, null); - try self.genSetReg(arg_ty, reg, arg_mcv); - }, - .stack_offset => { - // Here we need to emit instructions like this: - // mov qword ptr [rsp + stack_offset], x - return self.fail("TODO implement calling with parameters in memory", .{}); - }, - .ptr_stack_offset => { - return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); - }, - .ptr_embedded_in_code => { - return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); - }, - .undef => unreachable, - .immediate => unreachable, - .unreach => unreachable, - .dead => unreachable, - .embedded_in_code => unreachable, - .memory => unreachable, - .compare_flags_signed => unreachable, - .compare_flags_unsigned => unreachable, - } - } - - if (self.air.value(callee)) |func_value| { + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; // TODO I'm hacking my way through here by repurposing .memory for storing @@ -1627,41 +2193,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { } else { return self.fail("TODO implement calling bitcasted functions", .{}); } - } else { - return self.fail("TODO implement calling runtime known function pointer", .{}); - } - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - for (info.args) |mc_arg, arg_i| { - const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); - const arg_mcv = try self.resolveInst(args[arg_i]); - - switch (mc_arg) { - .none => continue, - .undef => unreachable, - .immediate => unreachable, - .unreach => unreachable, - .dead => unreachable, - .embedded_in_code => unreachable, - .memory => unreachable, - .compare_flags_signed => unreachable, - .compare_flags_unsigned => unreachable, - .register => |reg| { - try self.register_manager.getReg(reg, null); - try self.genSetReg(arg_ty, reg, arg_mcv); - }, - .stack_offset => { - return self.fail("TODO implement calling with parameters in memory", .{}); - }, - .ptr_stack_offset => { - return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); - }, - .ptr_embedded_in_code => { - return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); - }, - } - } - if (self.air.value(callee)) |func_value| { + } else if (self.bin_file.cast(link.File.Plan9)) |p9| { if (func_value.castTag(.function)) |func_payload| { try p9.seeDecl(func_payload.data.owner_decl); const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -1681,10 +2213,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { } else { return self.fail("TODO implement calling bitcasted functions", .{}); } - } else { - return self.fail("TODO implement calling runtime known function pointer", .{}); - } - } else unreachable; + } else unreachable; + } else { + assert(ty.zigTypeTag() == .Pointer); + const mcv = try self.resolveInst(callee); + try self.genSetReg(ty, .x30, mcv); + + _ = try self.addInst(.{ + .tag = .blr, + .data = .{ .reg = .x30 }, + }); + } const result: MCValue = result: { switch (info.return_value) { @@ -1741,12 +2280,23 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; + if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + const ty = self.air.typeOf(bin_op.lhs); - assert(ty.eql(self.air.typeOf(bin_op.rhs))); - if (ty.zigTypeTag() == .ErrorSet) - return self.fail("TODO implement cmp for errors", .{}); + + if (ty.abiSize(self.target.*) > 8) { + return self.fail("TODO cmp for types with size > 8", .{}); + } + + const signedness: std.builtin.Signedness = blk: { + // by default we tell the operand type is unsigned (i.e. bools and enum values) + if (ty.zigTypeTag() != .Int) break :blk .unsigned; + + // incase of an actual integer, we emit the correct signedness + break :blk ty.intInfo(self.target.*).signedness; + }; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -1812,8 +2362,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .immediate => |imm| { _ = try self.addInst(.{ .tag = .cmp_immediate, - .data = .{ .rr_imm12_sh = .{ - .rd = .xzr, + .data = .{ .r_imm12_sh = .{ .rn = lhs_mcv.register, .imm12 = @intCast(u12, imm), } }, @@ -1822,9 +2371,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else => unreachable, } - break :result switch (ty.isSignedInt()) { - true => MCValue{ .compare_flags_signed = op }, - false => MCValue{ .compare_flags_unsigned = op }, + break :result switch (signedness) { + .signed => MCValue{ .compare_flags_signed = op }, + .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1876,7 +2425,22 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { }, }, }), - else => return self.fail("TODO implement condbr when condition is {s}", .{@tagName(cond)}), + else => blk: { + const reg = switch (cond) { + .register => |r| r, + else => try self.copyToTmpRegister(Type.bool, cond), + }; + + break :blk try self.addInst(.{ + .tag = .cbz, + .data = .{ + .r_inst = .{ + .rt = reg, + .inst = undefined, // populated later through performReloc + }, + }, + }); + }, }; // Capture the state of register and stack allocation state so that we can revert to it. @@ -2008,18 +2572,51 @@ fn isNonNull(self: *Self, operand: MCValue) !MCValue { return self.fail("TODO call isNull and invert the result", .{}); } -fn isErr(self: *Self, operand: MCValue) !MCValue { +fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNonNull and invert the result. - return self.fail("TODO call isNonErr and invert the result", .{}); + + const error_type = ty.errorUnionSet(); + const payload_type = ty.errorUnionPayload(); + + if (!error_type.hasRuntimeBits()) { + return MCValue{ .immediate = 0 }; // always false + } else if (!payload_type.hasRuntimeBits()) { + if (error_type.abiSize(self.target.*) <= 8) { + const reg_mcv: MCValue = switch (operand) { + .register => operand, + else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, + }; + + _ = try self.addInst(.{ + .tag = .cmp_immediate, + .data = .{ .r_imm12_sh = .{ + .rn = reg_mcv.register, + .imm12 = 0, + } }, + }); + + return MCValue{ .compare_flags_unsigned = .gt }; + } else { + return self.fail("TODO isErr for errors with size > 8", .{}); + } + } else { + return self.fail("TODO isErr for non-empty payloads", .{}); + } } -fn isNonErr(self: *Self, operand: MCValue) !MCValue { - _ = operand; - // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call isNull and invert the result. - return self.fail("TODO call isErr and invert the result", .{}); +fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { + const is_err_result = try self.isErr(ty, operand); + switch (is_err_result) { + .compare_flags_unsigned => |op| { + assert(op == .gt); + return MCValue{ .compare_flags_unsigned = .lte }; + }, + .immediate => |imm| { + assert(imm == 0); + return MCValue{ .immediate = 1 }; + }, + else => unreachable, + } } fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { @@ -2080,7 +2677,8 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - break :result try self.isErr(operand); + const ty = self.air.typeOf(un_op); + break :result try self.isErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -2089,6 +2687,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); + const ptr_ty = self.air.typeOf(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -2098,7 +2697,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, self.air.typeOf(un_op)); - break :result try self.isErr(operand); + break :result try self.isErr(ptr_ty.elemType(), operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -2107,7 +2706,8 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - break :result try self.isNonErr(operand); + const ty = self.air.typeOf(un_op); + break :result try self.isNonErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -2116,6 +2716,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); + const ptr_ty = self.air.typeOf(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -2125,7 +2726,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, self.air.typeOf(un_op)); - break :result try self.isNonErr(operand); + break :result try self.isNonErr(ptr_ty.elemType(), operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -2184,8 +2785,9 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { - .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Air.Inst.Index, self.mir_instructions.len), - .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len), + .cbz => self.mir_instructions.items(.data)[inst].r_inst.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), else => unreachable, } } @@ -2212,7 +2814,16 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { - block_data.mcv = operand_mcv; + block_data.mcv = switch (operand_mcv) { + .none, .dead, .unreach => unreachable, + .register, .stack_offset, .memory => operand_mcv, + .immediate => blk: { + const new_mcv = try self.allocRegOrMem(block, true); + try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + break :blk new_mcv; + }, + else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), + }; } else { try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); } @@ -2412,8 +3023,61 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(ty, mcv); - return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + if (ty.abiSize(self.target.*) <= ptr_bytes) { + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); + } else { + // TODO optimize the register allocation + const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }); + self.register_manager.freezeRegs(®s); + defer self.register_manager.unfreezeRegs(®s); + + const src_reg = regs[0]; + const dst_reg = regs[1]; + const len_reg = regs[2]; + const count_reg = regs[3]; + const tmp_reg = regs[4]; + + // sub src_reg, fp, #off + const adj_src_offset = off + @intCast(u32, ty.abiSize(self.target.*)); + const src_offset = math.cast(u12, adj_src_offset) catch return self.fail("TODO load: larger stack offsets", .{}); + _ = try self.addInst(.{ + .tag = .sub_immediate, + .data = .{ .rr_imm12_sh = .{ + .rd = src_reg, + .rn = .x29, + .imm12 = src_offset, + } }, + }); + + // sub dst_reg, fp, #stack_offset + const adj_dst_off = stack_offset + @intCast(u32, ty.abiSize(self.target.*)); + const dst_offset = math.cast(u12, adj_dst_off) catch return self.fail("TODO load: larger stack offsets", .{}); + _ = try self.addInst(.{ + .tag = .sub_immediate, + .data = .{ .rr_imm12_sh = .{ + .rd = dst_reg, + .rn = .x29, + .imm12 = dst_offset, + } }, + }); + + // mov len, #elem_size + const elem_size = @intCast(u32, ty.abiSize(self.target.*)); + const len_imm = math.cast(u16, elem_size) catch return self.fail("TODO load: larger stack offsets", .{}); + _ = try self.addInst(.{ + .tag = .movk, + .data = .{ .r_imm16_sh = .{ + .rd = len_reg, + .imm16 = len_imm, + } }, + }); + + // memcpy(src, dst, len) + try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg); + } }, } } @@ -2445,11 +3109,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void _ = try self.addInst(.{ .tag = .cset, - .data = .{ .rrr_cond = .{ + .data = .{ .r_cond = .{ .rd = reg, - .rn = .xzr, - .rm = .xzr, - .cond = condition, + .cond = condition.negate(), } }, }); }, @@ -2533,7 +3195,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, }); }, - else => return self.fail("TODO implement genSetReg other types abi_size={}", .{abi_size}), + 3, 5, 6, 7 => return self.fail("TODO implement genSetReg types size {}", .{abi_size}), + else => unreachable, } }, else => return self.fail("TODO implement genSetReg for aarch64 {}", .{mcv}), @@ -2713,27 +3376,6 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } -/// If the MCValue is an immediate, and it does not fit within this type, -/// we put it in a register. -/// A potential opportunity for future optimization here would be keeping track -/// of the fact that the instruction is available both as an immediate -/// and as a register. -fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue { - const mcv = try self.resolveInst(operand); - const ti = @typeInfo(T).Int; - switch (mcv) { - .immediate => |imm| { - // This immediate is unsigned. - const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed)); - if (imm >= math.maxInt(U)) { - return MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.usize), mcv) }; - } - }, - else => {}, - } - return mcv; -} - fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -2847,31 +3489,32 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return MCValue{ .immediate = 0 }; - }, - } + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - const sub_val = typed_value.val.castTag(.eu_payload).?.data; - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = sub_val }); + if (typed_value.val.castTag(.eu_payload)) |pl| { + if (!payload_type.hasRuntimeBits()) { + // We use the error type directly as the type. + return MCValue{ .immediate = 0 }; + } + + _ = pl; + return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty}); + } else { + if (!payload_type.hasRuntimeBits()) { + // We use the error type directly as the type. + return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); + } + + return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty}); } - - return self.fail("TODO implement error union const of type '{}'", .{typed_value.ty}); }, else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}), } @@ -3015,13 +3658,18 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(reg: Register, size_bytes: u32) Register { - _ = size_bytes; - - return reg; + if (size_bytes == 0) { + unreachable; // should be comptime known + } else if (size_bytes <= 4) { + return reg.to32(); + } else if (size_bytes <= 8) { + return reg.to64(); + } else { + unreachable; // TODO handle floating-point registers + } } -/// For most architectures this does nothing. For x86_64 it resolves any aliased registers -/// to the 64-bit wide ones. +/// Resolves any aliased registers to the 64-bit wide ones. fn toCanonicalReg(reg: Register) Register { - return reg; + return reg.to64(); } diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 5b2610f508..bc37cb56c6 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -50,11 +50,13 @@ const InnerError = error{ }; const BranchType = enum { + cbz, b_cond, unconditional_branch_immediate, fn default(tag: Mir.Inst.Tag) BranchType { return switch (tag) { + .cbz => .cbz, .b, .bl => .unconditional_branch_immediate, .b_cond => .b_cond, else => unreachable, @@ -83,6 +85,8 @@ pub fn emitMir( .b => try emit.mirBranch(inst), .bl => try emit.mirBranch(inst), + .cbz => try emit.mirCompareAndBranch(inst), + .blr => try emit.mirUnconditionalBranchRegister(inst), .ret => try emit.mirUnconditionalBranchRegister(inst), @@ -91,7 +95,9 @@ pub fn emitMir( .call_extern => try emit.mirCallExtern(inst), + .add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst), .cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst), + .sub_shifted_register => try emit.mirAddSubtractShiftedRegister(inst), .cset => try emit.mirConditionalSelect(inst), @@ -100,6 +106,8 @@ pub fn emitMir( .dbg_prologue_end => try emit.mirDebugPrologueEnd(), .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), + .eor_shifted_register => try emit.mirLogicalShiftedRegister(inst), + .load_memory => try emit.mirLoadMemory(inst), .ldp => try emit.mirLoadStoreRegisterPair(inst), @@ -128,10 +136,13 @@ pub fn emitMir( .mov_register => try emit.mirMoveRegister(inst), .mov_to_from_sp => try emit.mirMoveRegister(inst), + .mvn => try emit.mirMoveRegister(inst), .movk => try emit.mirMoveWideImmediate(inst), .movz => try emit.mirMoveWideImmediate(inst), + .mul => try emit.mirDataProcessing3Source(inst), + .nop => try emit.mirNop(), .push_regs => try emit.mirPushPopRegs(inst), @@ -156,15 +167,22 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType { assert(offset & 0b11 == 0); switch (tag) { + .cbz => { + if (std.math.cast(i19, @shrExact(offset, 2))) |_| { + return BranchType.cbz; + } else |_| { + return emit.fail("TODO support cbz branches larger than +-1 MiB", .{}); + } + }, .b, .bl => { - if (std.math.cast(i26, offset >> 2)) |_| { + if (std.math.cast(i26, @shrExact(offset, 2))) |_| { return BranchType.unconditional_branch_immediate; } else |_| { - return emit.fail("TODO support branches larger than +-128 MiB", .{}); + return emit.fail("TODO support unconditional branches larger than +-128 MiB", .{}); } }, .b_cond => { - if (std.math.cast(i19, offset >> 2)) |_| { + if (std.math.cast(i19, @shrExact(offset, 2))) |_| { return BranchType.b_cond; } else |_| { return emit.fail("TODO support conditional branches larger than +-1 MiB", .{}); @@ -179,8 +197,10 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { if (isBranch(tag)) { switch (emit.branch_types.get(inst).?) { - .unconditional_branch_immediate => return 4, - .b_cond => return 4, + .cbz, + .unconditional_branch_immediate, + .b_cond, + => return 4, } } @@ -201,6 +221,12 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { return 5 * 4; } }, + .pop_regs, .push_regs => { + const reg_list = emit.mir.instructions.items(.data)[inst].reg_list; + const number_of_regs = @popCount(u32, reg_list); + const number_of_insts = std.math.divCeil(u6, number_of_regs, 2) catch unreachable; + return number_of_insts * 4; + }, .call_extern => return 4, .dbg_line, .dbg_epilogue_begin, @@ -212,7 +238,11 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { fn isBranch(tag: Mir.Inst.Tag) bool { return switch (tag) { - .b, .bl, .b_cond => true, + .cbz, + .b, + .bl, + .b_cond, + => true, else => false, }; } @@ -221,6 +251,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index { const tag = emit.mir.instructions.items(.tag)[inst]; switch (tag) { + .cbz => return emit.mir.instructions.items(.data)[inst].r_inst.inst, .b, .bl => return emit.mir.instructions.items(.data)[inst].inst, .b_cond => return emit.mir.instructions.items(.data)[inst].inst_cond.inst, else => unreachable, @@ -414,27 +445,30 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; - const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh; - switch (tag) { - .add_immediate => try emit.writeInstruction(Instruction.add( - rr_imm12_sh.rd, - rr_imm12_sh.rn, - rr_imm12_sh.imm12, - rr_imm12_sh.sh == 1, - )), - .cmp_immediate => try emit.writeInstruction(Instruction.subs( - rr_imm12_sh.rd, - rr_imm12_sh.rn, - rr_imm12_sh.imm12, - rr_imm12_sh.sh == 1, - )), - .sub_immediate => try emit.writeInstruction(Instruction.sub( - rr_imm12_sh.rd, - rr_imm12_sh.rn, - rr_imm12_sh.imm12, - rr_imm12_sh.sh == 1, - )), + .add_immediate, + .sub_immediate, + => { + const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh; + const rd = rr_imm12_sh.rd; + const rn = rr_imm12_sh.rn; + const imm12 = rr_imm12_sh.imm12; + const sh = rr_imm12_sh.sh == 1; + + switch (tag) { + .add_immediate => try emit.writeInstruction(Instruction.add(rd, rn, imm12, sh)), + .sub_immediate => try emit.writeInstruction(Instruction.sub(rd, rn, imm12, sh)), + else => unreachable, + } + }, + .cmp_immediate => { + const r_imm12_sh = emit.mir.instructions.items(.data)[inst].r_imm12_sh; + const rn = r_imm12_sh.rn; + const imm12 = r_imm12_sh.imm12; + const sh = r_imm12_sh.sh == 1; + + try emit.writeInstruction(Instruction.subs(.xzr, rn, imm12, sh)); + }, else => unreachable, } } @@ -481,6 +515,23 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const r_inst = emit.mir.instructions.items(.data)[inst].r_inst; + + const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len); + const branch_type = emit.branch_types.get(inst).?; + log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset }); + + switch (branch_type) { + .cbz => switch (tag) { + .cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))), + else => unreachable, + }, + else => unreachable, + } +} + fn mirUnconditionalBranchRegister(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const reg = emit.mir.instructions.items(.data)[inst].reg; @@ -565,30 +616,42 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift; + const rd = rrr_imm6_shift.rd; + const rn = rrr_imm6_shift.rn; + const rm = rrr_imm6_shift.rm; + const shift = rrr_imm6_shift.shift; + const imm6 = rrr_imm6_shift.imm6; switch (tag) { - .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister( - rrr_imm6_shift.rd, - rrr_imm6_shift.rn, - rrr_imm6_shift.rm, - rrr_imm6_shift.shift, - rrr_imm6_shift.imm6, - )), + .add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)), + .cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)), + .sub_shifted_register => try emit.writeInstruction(Instruction.subShiftedRegister(rd, rn, rm, shift, imm6)), else => unreachable, } } fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; - const rrr_cond = emit.mir.instructions.items(.data)[inst].rrr_cond; + switch (tag) { + .cset => { + const r_cond = emit.mir.instructions.items(.data)[inst].r_cond; + try emit.writeInstruction(Instruction.csinc(r_cond.rd, .xzr, .xzr, r_cond.cond)); + }, + else => unreachable, + } +} + +fn mirLogicalShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const rrr_imm6_logical_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_logical_shift; + const rd = rrr_imm6_logical_shift.rd; + const rn = rrr_imm6_logical_shift.rn; + const rm = rrr_imm6_logical_shift.rm; + const shift = rrr_imm6_logical_shift.shift; + const imm6 = rrr_imm6_logical_shift.imm6; switch (tag) { - .cset => try emit.writeInstruction(Instruction.csinc( - rrr_cond.rd, - rrr_cond.rn, - rrr_cond.rm, - rrr_cond.cond, - )), + .eor_shifted_register => try emit.writeInstruction(Instruction.eor(rd, rn, rm, shift, imm6)), else => unreachable, } } @@ -653,20 +716,14 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void { fn mirLoadStoreRegisterPair(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const load_store_register_pair = emit.mir.instructions.items(.data)[inst].load_store_register_pair; + const rt = load_store_register_pair.rt; + const rt2 = load_store_register_pair.rt2; + const rn = load_store_register_pair.rn; + const offset = load_store_register_pair.offset; switch (tag) { - .stp => try emit.writeInstruction(Instruction.stp( - load_store_register_pair.rt, - load_store_register_pair.rt2, - load_store_register_pair.rn, - load_store_register_pair.offset, - )), - .ldp => try emit.writeInstruction(Instruction.ldp( - load_store_register_pair.rt, - load_store_register_pair.rt2, - load_store_register_pair.rn, - load_store_register_pair.offset, - )), + .stp => try emit.writeInstruction(Instruction.stp(rt, rt2, rn, offset)), + .ldp => try emit.writeInstruction(Instruction.ldp(rt, rt2, rn, offset)), else => unreachable, } } @@ -782,11 +839,19 @@ fn mirLoadStoreRegisterRegister(emit: *Emit, inst: Mir.Inst.Index) !void { fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; - const rr = emit.mir.instructions.items(.data)[inst].rr; - switch (tag) { - .mov_register => try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, Instruction.Shift.none)), - .mov_to_from_sp => try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false)), + .mov_register => { + const rr = emit.mir.instructions.items(.data)[inst].rr; + try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, .lsl, 0)); + }, + .mov_to_from_sp => { + const rr = emit.mir.instructions.items(.data)[inst].rr; + try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false)); + }, + .mvn => { + const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift; + try emit.writeInstruction(Instruction.orn(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, .lsl, 0)); + }, else => unreachable, } } @@ -802,6 +867,16 @@ fn mirMoveWideImmediate(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirDataProcessing3Source(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const rrr = emit.mir.instructions.items(.data)[inst].rrr; + + switch (tag) { + .mul => try emit.writeInstruction(Instruction.mul(rrr.rd, rrr.rn, rrr.rm)), + else => unreachable, + } +} + fn mirNop(emit: *Emit) !void { try emit.writeInstruction(Instruction.nop()); } diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig index 5546b32652..92b0604347 100644 --- a/src/arch/aarch64/Mir.zig +++ b/src/arch/aarch64/Mir.zig @@ -26,6 +26,8 @@ pub const Inst = struct { pub const Tag = enum(u16) { /// Add (immediate) add_immediate, + /// Add (shifted register) + add_shifted_register, /// Branch conditionally b_cond, /// Branch @@ -38,6 +40,8 @@ pub const Inst = struct { brk, /// Pseudo-instruction: Call extern call_extern, + /// Compare and Branch on Zero + cbz, /// Compare (immediate) cmp_immediate, /// Compare (shifted register) @@ -50,6 +54,8 @@ pub const Inst = struct { dbg_epilogue_begin, /// Pseudo-instruction: Update debug line dbg_line, + /// Bitwise Exclusive OR (shifted register) + eor_shifted_register, /// Pseudo-instruction: Load memory /// /// Payload is `LoadMemory` @@ -82,6 +88,10 @@ pub const Inst = struct { movk, /// Move wide with zero movz, + /// Multiply + mul, + /// Bitwise NOT + mvn, /// No Operation nop, /// Pseudo-instruction: Pop multiple registers @@ -112,6 +122,8 @@ pub const Inst = struct { strh_register, /// Subtract (immediate) sub_immediate, + /// Subtract (shifted register) + sub_shifted_register, /// Supervisor Call svc, }; @@ -171,6 +183,20 @@ pub const Inst = struct { imm16: u16, hw: u2 = 0, }, + /// A register and a condition + /// + /// Used by e.g. cset + r_cond: struct { + rd: Register, + cond: bits.Instruction.Condition, + }, + /// A register and another instruction + /// + /// Used by e.g. cbz + r_inst: struct { + rt: Register, + inst: Index, + }, /// Two registers /// /// Used by e.g. mov_register @@ -178,6 +204,14 @@ pub const Inst = struct { rd: Register, rn: Register, }, + /// A register, an unsigned 12-bit immediate, and an optional shift + /// + /// Used by e.g. cmp_immediate + r_imm12_sh: struct { + rn: Register, + imm12: u12, + sh: u1 = 0, + }, /// Two registers, an unsigned 12-bit immediate, and an optional shift /// /// Used by e.g. sub_immediate @@ -187,6 +221,23 @@ pub const Inst = struct { imm12: u12, sh: u1 = 0, }, + /// Two registers and a shift (shift type and 6-bit amount) + /// + /// Used by e.g. mvn + rr_imm6_shift: struct { + rd: Register, + rm: Register, + imm6: u6, + shift: bits.Instruction.AddSubtractShiftedRegisterShift, + }, + /// Two registers + /// + /// Used by e.g. mul + rrr: struct { + rd: Register, + rn: Register, + rm: Register, + }, /// Three registers and a shift (shift type and 6-bit amount) /// /// Used by e.g. cmp_shifted_register @@ -197,18 +248,20 @@ pub const Inst = struct { imm6: u6, shift: bits.Instruction.AddSubtractShiftedRegisterShift, }, - /// Three registers and a condition + /// Three registers and a shift (logical instruction version) + /// (shift type and 6-bit amount) /// - /// Used by e.g. cset - rrr_cond: struct { + /// Used by e.g. eor_shifted_register + rrr_imm6_logical_shift: struct { rd: Register, rn: Register, rm: Register, - cond: bits.Instruction.Condition, + imm6: u6, + shift: bits.Instruction.LogicalShiftedRegisterShift, }, /// Two registers and a LoadStoreOffsetImmediate /// - /// Used by e.g. str_register + /// Used by e.g. str_immediate load_store_register_immediate: struct { rt: Register, rn: Register, @@ -224,7 +277,7 @@ pub const Inst = struct { }, /// A registers and a stack offset /// - /// Used by e.g. str_register + /// Used by e.g. str_stack load_store_stack: struct { rt: Register, offset: u32, diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig index 10eb919cb9..a5d56cfcc7 100644 --- a/src/arch/aarch64/bits.zig +++ b/src/arch/aarch64/bits.zig @@ -332,23 +332,17 @@ pub const Instruction = union(enum) { op: u1, sf: u1, }, - - pub const Shift = struct { - shift: Type = .lsl, - amount: u6 = 0, - - pub const Type = enum(u2) { - lsl, - lsr, - asr, - ror, - }; - - pub const none = Shift{ - .shift = .lsl, - .amount = 0, - }; - }; + data_processing_3_source: packed struct { + rd: u5, + rn: u5, + ra: u5, + o0: u1, + rm: u5, + op31: u3, + fixed: u5 = 0b11011, + op54: u2, + sf: u1, + }, pub const Condition = enum(u4) { /// Integer: Equal @@ -470,6 +464,7 @@ pub const Instruction = union(enum) { .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25), .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31), .conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31, + .data_processing_3_source => |v| @bitCast(u32, v), }; } @@ -807,25 +802,28 @@ pub const Instruction = union(enum) { }; } + pub const LogicalShiftedRegisterShift = enum(u2) { lsl, lsr, asr, ror }; + fn logicalShiftedRegister( opc: u2, n: u1, - shift: Shift, rd: Register, rn: Register, rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, ) Instruction { switch (rd.size()) { 32 => { - assert(shift.amount < 32); + assert(amount < 32); return Instruction{ .logical_shifted_register = .{ .rd = rd.id(), .rn = rn.id(), - .imm6 = shift.amount, + .imm6 = amount, .rm = rm.id(), .n = n, - .shift = @enumToInt(shift.shift), + .shift = @enumToInt(shift), .opc = opc, .sf = 0b0, }, @@ -836,10 +834,10 @@ pub const Instruction = union(enum) { .logical_shifted_register = .{ .rd = rd.id(), .rn = rn.id(), - .imm6 = shift.amount, + .imm6 = amount, .rm = rm.id(), .n = n, - .shift = @enumToInt(shift.shift), + .shift = @enumToInt(shift), .opc = opc, .sf = 0b1, }, @@ -967,6 +965,33 @@ pub const Instruction = union(enum) { }; } + fn dataProcessing3Source( + op54: u2, + op31: u3, + o0: u1, + rd: Register, + rn: Register, + rm: Register, + ra: Register, + ) Instruction { + return Instruction{ + .data_processing_3_source = .{ + .rd = rd.id(), + .rn = rn.id(), + .ra = ra.id(), + .o0 = o0, + .rm = rm.id(), + .op31 = op31, + .op54 = op54, + .sf = switch (rd.size()) { + 32 => 0b0, + 64 => 0b1, + else => unreachable, // unexpected register size + }, + }, + }; + } + // Helper functions for assembly syntax functions // Move wide (immediate) @@ -1120,36 +1145,84 @@ pub const Instruction = union(enum) { // Logical (shifted register) - pub fn @"and"(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b00, 0b0, shift, rd, rn, rm); + pub fn @"and"( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount); } - pub fn bic(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b00, 0b1, shift, rd, rn, rm); + pub fn bic( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount); } - pub fn orr(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b01, 0b0, shift, rd, rn, rm); + pub fn orr( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount); } - pub fn orn(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b01, 0b1, shift, rd, rn, rm); + pub fn orn( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount); } - pub fn eor(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b10, 0b0, shift, rd, rn, rm); + pub fn eor( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount); } - pub fn eon(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b10, 0b1, shift, rd, rn, rm); + pub fn eon( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount); } - pub fn ands(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b11, 0b0, shift, rd, rn, rm); + pub fn ands( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount); } - pub fn bics(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction { - return logicalShiftedRegister(0b11, 0b1, shift, rd, rn, rm); + pub fn bics( + rd: Register, + rn: Register, + rm: Register, + shift: LogicalShiftedRegisterShift, + amount: u6, + ) Instruction { + return logicalShiftedRegister(0b11, 0b1, rd, rn, rm, shift, amount); } // Add/subtract (immediate) @@ -1245,6 +1318,24 @@ pub const Instruction = union(enum) { pub fn csneg(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction { return conditionalSelect(0b01, 0b1, 0b0, rd, rn, rm, cond); } + + // Data processing (3 source) + + pub fn madd(rd: Register, rn: Register, rm: Register, ra: Register) Instruction { + return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra); + } + + pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction { + return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra); + } + + pub fn mul(rd: Register, rn: Register, rm: Register) Instruction { + return madd(rd, rn, rm, .xzr); + } + + pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction { + return msub(rd, rn, rm, .xzr); + } }; test { @@ -1259,11 +1350,11 @@ test "serialize instructions" { const testcases = [_]Testcase{ .{ // orr x0, xzr, x1 - .inst = Instruction.orr(.x0, .xzr, .x1, Instruction.Shift.none), + .inst = Instruction.orr(.x0, .xzr, .x1, .lsl, 0), .expected = 0b1_01_01010_00_0_00001_000000_11111_00000, }, .{ // orn x0, xzr, x1 - .inst = Instruction.orn(.x0, .xzr, .x1, Instruction.Shift.none), + .inst = Instruction.orn(.x0, .xzr, .x1, .lsl, 0), .expected = 0b1_01_01010_00_1_00001_000000_11111_00000, }, .{ // movz x1, #4 @@ -1383,11 +1474,11 @@ test "serialize instructions" { .expected = 0b10_101_0_001_1_0000010_00010_11111_00001, }, .{ // and x0, x4, x2 - .inst = Instruction.@"and"(.x0, .x4, .x2, .{}), + .inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0), .expected = 0b1_00_01010_00_0_00010_000000_00100_00000, }, .{ // and x0, x4, x2, lsl #0x8 - .inst = Instruction.@"and"(.x0, .x4, .x2, .{ .shift = .lsl, .amount = 0x8 }), + .inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0x8), .expected = 0b1_00_01010_00_0_00010_001000_00100_00000, }, .{ // add x0, x10, #10 @@ -1414,6 +1505,10 @@ test "serialize instructions" { .inst = Instruction.csinc(.x1, .x2, .x4, .eq), .expected = 0b1_0_0_11010100_00100_0000_0_1_00010_00001, }, + .{ // mul x1, x4, x9 + .inst = Instruction.mul(.x1, .x4, .x9), + .expected = 0b1_00_11011_000_01001_0_11111_00100_00001, + }, }; for (testcases) |case| { diff --git a/test/behavior.zig b/test/behavior.zig index db6863a8b0..abfd8fb0bf 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -54,7 +54,7 @@ test { _ = @import("behavior/decltest.zig"); } - if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) { + if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_aarch64) { // Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend. _ = @import("behavior/bitcast.zig"); _ = @import("behavior/bugs/624.zig"); diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 96278524c0..a8d8fcd206 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -27,6 +27,7 @@ test "default alignment allows unspecified in type syntax" { } test "implicitly decreasing pointer alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const a: u32 align(4) = 3; const b: u32 align(8) = 4; try expect(addUnaligned(&a, &b) == 7); @@ -37,6 +38,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { } test "@alignCast pointers" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var x: u32 align(4) = 1; expectsOnly1(&x); try expect(x == 2); @@ -102,6 +104,7 @@ fn fnWithAlignedStack() i32 { } test "implicitly decreasing slice alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const a: u32 align(4) = 3; @@ -113,6 +116,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { } test "specifying alignment allows pointer cast" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testBytesAlign(0x33); @@ -124,6 +128,7 @@ fn testBytesAlign(b: u8) !void { } test "@alignCast slices" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array align(4) = [_]u32{ 1, 1 }; @@ -139,6 +144,7 @@ fn sliceExpects4(slice: []align(4) u32) void { } test "return error union with 128-bit integer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(3 == try give()); @@ -148,6 +154,7 @@ fn give() anyerror!u128 { } test "page aligned array on stack" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @@ -173,6 +180,7 @@ fn noop1() align(1) void {} fn noop4() align(4) void {} test "function alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -189,6 +197,7 @@ test "function alignment" { } test "implicitly decreasing fn alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -216,6 +225,7 @@ fn alignedBig() align(16) i32 { } test "@alignCast functions" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; @@ -239,6 +249,7 @@ fn simple4() align(4) i32 { } test "generic function with align param" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; @@ -260,6 +271,7 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 { } test "runtime known array index has best alignment possible" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @@ -302,6 +314,7 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void { } test "alignment of function with c calling convention" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; var runtime_nothing = ¬hing; @@ -318,6 +331,7 @@ const DefaultAligned = struct { }; test "read 128-bit field from default aligned struct in stack memory" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -337,6 +351,7 @@ var default_aligned_global = DefaultAligned{ }; test "read 128-bit field from default aligned struct in global memory" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -348,6 +363,7 @@ test "read 128-bit field from default aligned struct in global memory" { } test "struct field explicit alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -369,6 +385,7 @@ test "struct field explicit alignment" { } test "align(@alignOf(T)) T does not force resolution of T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -397,6 +414,7 @@ test "align(@alignOf(T)) T does not force resolution of T" { } test "align(N) on functions" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig index 749855db52..5a49146694 100644 --- a/test/behavior/alignof.zig +++ b/test/behavior/alignof.zig @@ -11,6 +11,7 @@ const Foo = struct { }; test "@alignOf(T) before referencing T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 23820e71b5..e93f0f3e90 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -6,6 +6,7 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "array to slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const a: u32 align(4) = 3; @@ -20,6 +21,7 @@ test "array to slice" { } test "arrays" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array: [5]u32 = undefined; @@ -46,6 +48,7 @@ fn getArrayLen(a: []const u32) usize { } test "array init with mult" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const a = 'a'; @@ -57,6 +60,7 @@ test "array init with mult" { } test "array literal with explicit type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 }; @@ -86,6 +90,7 @@ const ArrayDotLenConstExpr = struct { const some_array = [_]u8{ 0, 1, 2, 3 }; test "array literal with specified size" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array = [2]u8{ 1, 2 }; @@ -94,6 +99,7 @@ test "array literal with specified size" { } test "array len field" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var arr = [4]u8{ 0, 0, 0, 0 }; @@ -105,6 +111,7 @@ test "array len field" { } test "array with sentinels" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -134,6 +141,7 @@ test "array with sentinels" { } test "void arrays" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array: [4]void = undefined; @@ -144,6 +152,7 @@ test "void arrays" { } test "nested arrays" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" }; @@ -157,6 +166,7 @@ test "nested arrays" { } test "implicit comptime in array type size" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var arr: [plusOne(10)]bool = undefined; @@ -168,6 +178,7 @@ fn plusOne(x: u32) u32 { } test "single-item pointer to array indexing and slicing" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testSingleItemPtrArrayIndexSlice(); @@ -193,6 +204,7 @@ fn doSomeMangling(array: *[4]u8) void { } test "implicit cast zero sized array ptr to slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; { @@ -208,6 +220,7 @@ test "implicit cast zero sized array ptr to slice" { } test "anonymous list literal syntax" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -227,6 +240,7 @@ var s_array: [8]Sub = undefined; const Sub = struct { b: u8 }; const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -243,6 +257,7 @@ test "set global var array via slice embedded in struct" { } test "read/write through global variable array of struct fields initialized via array mult" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -264,6 +279,7 @@ test "read/write through global variable array of struct fields initialized via } test "implicit cast single-item pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -284,6 +300,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 { } test "comptime evaluating function that takes array by value" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -296,6 +313,7 @@ test "comptime evaluating function that takes array by value" { } test "runtime initialize array elem and then implicit cast to slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -306,6 +324,7 @@ test "runtime initialize array elem and then implicit cast to slice" { } test "array literal as argument to function" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -334,6 +353,7 @@ test "array literal as argument to function" { } test "double nested array to const slice cast in array literal" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -395,6 +415,7 @@ test "double nested array to const slice cast in array literal" { } test "anonymous literal in array" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -420,6 +441,7 @@ test "anonymous literal in array" { } test "access the null element of a null terminated array" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -437,6 +459,7 @@ test "access the null element of a null terminated array" { } test "type deduction for array subscript expression" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -455,6 +478,7 @@ test "type deduction for array subscript expression" { } test "sentinel element count towards the ABI size calculation" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -481,6 +505,7 @@ test "sentinel element count towards the ABI size calculation" { } test "zero-sized array with recursive type definition" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -505,6 +530,7 @@ test "zero-sized array with recursive type definition" { } test "type coercion of anon struct literal to array" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -540,6 +566,7 @@ test "type coercion of anon struct literal to array" { } test "type coercion of pointer to anon struct literal to pointer to array" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 18a24f9b3a..0c2c293d23 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -15,6 +15,7 @@ test "empty function with comments" { } test "truncate" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(testTruncate(0x10fd) == 0xfd); @@ -25,6 +26,7 @@ fn testTruncate(x: u32) u8 { } test "truncate to non-power-of-two integers" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testTrunc(u32, u1, 0b10101, 0b1); @@ -46,6 +48,7 @@ const g1: i32 = 1233 + 1; var g2: i32 = 0; test "global variables" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try expect(g2 == 0); g2 = g1; try expect(g2 == 1234); @@ -112,6 +115,7 @@ fn first4KeysOfHomeRow() []const u8 { } test "return string from function" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -119,12 +123,14 @@ test "return string from function" { } test "hex escape" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello")); } test "multiline string" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = @@ -137,6 +143,7 @@ test "multiline string" { } test "multiline string comments at start" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = @@ -149,6 +156,7 @@ test "multiline string comments at start" { } test "multiline string comments at end" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = @@ -161,6 +169,7 @@ test "multiline string comments at end" { } test "multiline string comments in middle" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = @@ -173,6 +182,7 @@ test "multiline string comments in middle" { } test "multiline string comments at multiple places" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = @@ -191,6 +201,7 @@ test "string concatenation" { } test "array mult operator" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(mem.eql(u8, "ab" ** 5, "ababababab")); @@ -216,6 +227,7 @@ test "compile time global reinterpret" { } test "cast undefined" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const array: [100]u8 = undefined; @@ -227,6 +239,7 @@ fn testCastUndefined(x: []const u8) void { } test "implicit cast after unreachable" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(outer() == 1234); @@ -284,6 +297,7 @@ fn fB() []const u8 { } test "call function pointer in struct" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -310,6 +324,7 @@ const FnPtrWrapper = struct { }; test "const ptr from var variable" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var x: u64 = undefined; @@ -326,6 +341,7 @@ fn copy(src: *const u64, dst: *u64) void { } test "call result of if else expression" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -339,6 +355,7 @@ fn f2(x: bool) []const u8 { } test "memcpy and memset intrinsics" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -361,6 +378,7 @@ fn testMemcpyMemset() !void { } test "variable is allowed to be a pointer to an opaque type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -374,6 +392,7 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA { } test "take address of parameter" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -400,6 +419,7 @@ fn testPointerToVoidReturnType2() *const void { } test "array 2D const double ptr" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -419,6 +439,7 @@ fn testArray2DConstDoublePtr(ptr: *const f32) !void { } test "double implicit cast in same expression" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -430,6 +451,7 @@ fn nine() u8 { } test "struct inside function" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testStructInFn(); @@ -451,6 +473,7 @@ fn testStructInFn() !void { } test "fn call returning scalar optional in equality expression" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try expect(getNull() == null); } @@ -459,6 +482,7 @@ fn getNull() ?*i32 { } test "global variable assignment with optional unwrapping with var initialized to undefined" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -476,6 +500,7 @@ test "global variable assignment with optional unwrapping with var initialized t var global_foo: *i32 = undefined; test "peer result location with typed parent, runtime condition, comptime prongs" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -550,6 +575,7 @@ test "comptime cast fn to ptr" { } test "equality compare fn ptrs" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; var a = &emptyFn; @@ -557,6 +583,7 @@ test "equality compare fn ptrs" { } test "self reference through fn ptr field" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -576,6 +603,7 @@ test "self reference through fn ptr field" { } test "global variable initialized to global variable array element" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -593,6 +621,7 @@ var gdt = [_]GDTEntry{ var global_ptr = &gdt[0]; test "global constant is loaded with a runtime-known index" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -610,6 +639,7 @@ test "global constant is loaded with a runtime-known index" { } test "multiline string literal is null terminated" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -643,6 +673,7 @@ test "explicit cast optional pointers" { } test "pointer comparison" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -655,6 +686,7 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool { } test "string concatenation" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; diff --git a/test/behavior/bit_shifting.zig b/test/behavior/bit_shifting.zig index c0b2729bdf..1a01cbd732 100644 --- a/test/behavior/bit_shifting.zig +++ b/test/behavior/bit_shifting.zig @@ -61,6 +61,7 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt } test "sharded table" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; diff --git a/test/behavior/bugs/1381.zig b/test/behavior/bugs/1381.zig index 91a253af24..2f05d2fa96 100644 --- a/test/behavior/bugs/1381.zig +++ b/test/behavior/bugs/1381.zig @@ -12,6 +12,7 @@ const A = union(enum) { }; test "union that needs padding bytes inside an array" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; var as = [_]A{ diff --git a/test/behavior/bugs/1486.zig b/test/behavior/bugs/1486.zig index 8f954a3600..91d5b621d2 100644 --- a/test/behavior/bugs/1486.zig +++ b/test/behavior/bugs/1486.zig @@ -1,10 +1,12 @@ const std = @import("std"); const expect = std.testing.expect; +const builtin = @import("builtin"); const ptr = &global; var global: usize = 123; test "constant pointer to global variable causes runtime load" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; global = 1234; try expect(&global == ptr); try expect(ptr.* == 1234); diff --git a/test/behavior/bugs/1735.zig b/test/behavior/bugs/1735.zig index c07bd9472b..556b899de1 100644 --- a/test/behavior/bugs/1735.zig +++ b/test/behavior/bugs/1735.zig @@ -42,6 +42,7 @@ const a = struct { }; test "initialization" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; diff --git a/test/behavior/bugs/1741.zig b/test/behavior/bugs/1741.zig index 280aafc52e..f4cc2101c4 100644 --- a/test/behavior/bugs/1741.zig +++ b/test/behavior/bugs/1741.zig @@ -2,6 +2,7 @@ const std = @import("std"); const builtin = @import("builtin"); test "fixed" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const x: f32 align(128) = 12.34; diff --git a/test/behavior/bugs/2006.zig b/test/behavior/bugs/2006.zig index 4d76230c88..fcacb9a2c6 100644 --- a/test/behavior/bugs/2006.zig +++ b/test/behavior/bugs/2006.zig @@ -6,6 +6,7 @@ const S = struct { p: *S, }; test "bug 2006" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; var a: S = undefined; a = S{ .p = undefined }; diff --git a/test/behavior/bugs/2578.zig b/test/behavior/bugs/2578.zig index 15f5bf0e53..90db296158 100644 --- a/test/behavior/bugs/2578.zig +++ b/test/behavior/bugs/2578.zig @@ -12,6 +12,7 @@ fn bar(pointer: ?*anyopaque) void { } test "fixed" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/3007.zig b/test/behavior/bugs/3007.zig index 0b3cbdc56d..c93bbf8d20 100644 --- a/test/behavior/bugs/3007.zig +++ b/test/behavior/bugs/3007.zig @@ -19,6 +19,7 @@ fn get_foo() Foo.FooError!*Foo { } test "fixed" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/3112.zig b/test/behavior/bugs/3112.zig index 089f3e59f6..ebd8fd1ef3 100644 --- a/test/behavior/bugs/3112.zig +++ b/test/behavior/bugs/3112.zig @@ -12,6 +12,7 @@ fn prev(p: ?State) void { } test "zig test crash" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; diff --git a/test/behavior/bugs/3367.zig b/test/behavior/bugs/3367.zig index f540fdf6df..6468498ab6 100644 --- a/test/behavior/bugs/3367.zig +++ b/test/behavior/bugs/3367.zig @@ -10,6 +10,7 @@ const Mixin = struct { }; test "container member access usingnamespace decls" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var foo = Foo{}; diff --git a/test/behavior/bugs/394.zig b/test/behavior/bugs/394.zig index ec1bd5cc9f..28934c8dd0 100644 --- a/test/behavior/bugs/394.zig +++ b/test/behavior/bugs/394.zig @@ -11,6 +11,7 @@ const expect = @import("std").testing.expect; const builtin = @import("builtin"); test "bug 394 fixed" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const x = S{ diff --git a/test/behavior/bugs/656.zig b/test/behavior/bugs/656.zig index bd93c2b88c..d71dc426f9 100644 --- a/test/behavior/bugs/656.zig +++ b/test/behavior/bugs/656.zig @@ -11,6 +11,7 @@ const Value = struct { }; test "optional if after an if in a switch prong of a switch with 2 prongs in an else" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; try foo(false, true); diff --git a/test/behavior/bugs/7250.zig b/test/behavior/bugs/7250.zig index 27810acea4..ee04847e51 100644 --- a/test/behavior/bugs/7250.zig +++ b/test/behavior/bugs/7250.zig @@ -14,6 +14,7 @@ threadlocal var g_uart0 = nrfx_uart_t{ }; test "reference a global threadlocal variable" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 4028d8c5f1..85e3368441 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -18,6 +18,7 @@ test "integer literal to pointer cast" { } test "peer type resolution: ?T and T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(peerTypeTAndOptionalT(true, false).? == 0); @@ -94,6 +95,7 @@ test "comptime_int @intToFloat" { } test "@floatToInt" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -116,6 +118,7 @@ fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void { } test "implicitly cast indirect pointer to maybe-indirect pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -174,6 +177,7 @@ test "@floatCast comptime_int and comptime_float" { } test "coerce undefined to optional" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try expect(MakeType(void).getNull() == null); @@ -193,6 +197,7 @@ fn MakeType(comptime T: type) type { } test "implicit cast from *[N]T to [*c]T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var x: [4]u16 = [4]u16{ 0, 1, 2, 3 }; @@ -205,6 +210,7 @@ test "implicit cast from *[N]T to [*c]T" { } test "*usize to *void" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var i = @as(usize, 0); var v = @ptrCast(*void, &i); v.* = {}; @@ -230,6 +236,7 @@ test "@intCast to u0 and use the result" { } test "peer result null and comptime_int" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -253,6 +260,7 @@ test "peer result null and comptime_int" { } test "*const ?[*]const T to [*c]const [*c]const T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array = [_]u8{ 'o', 'k' }; @@ -264,6 +272,7 @@ test "*const ?[*]const T to [*c]const [*c]const T" { } test "array coersion to undefined at runtime" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @setRuntimeSafety(true); @@ -293,6 +302,7 @@ fn implicitIntLitToOptional() void { } test "return u8 coercing into ?u32 return type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -313,6 +323,7 @@ test "cast from ?[*]T to ??[*]T" { } test "peer type unsigned int to signed" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -325,6 +336,7 @@ test "peer type unsigned int to signed" { } test "expected [*c]const u8, found [*:0]const u8" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -384,6 +396,7 @@ fn castToOptionalTypeError(z: i32) !void { } test "implicitly cast from [0]T to anyerror![]T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -455,6 +468,7 @@ fn testCastConstArrayRefToConstSlice() !void { } test "peer type resolution: error and [N]T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -689,6 +703,7 @@ test "type coercion related to sentinel-termination" { } test "peer type resolution implicit cast to return type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -710,6 +725,7 @@ test "peer type resolution implicit cast to return type" { } test "peer type resolution implicit cast to variable type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -806,6 +822,7 @@ test "comptime float casts" { } test "pointer reinterpret const float to int" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -822,6 +839,7 @@ test "pointer reinterpret const float to int" { } test "implicit cast from [*]T to ?*anyopaque" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -840,6 +858,7 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void { } test "compile time int to ptr of function" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -857,6 +876,7 @@ fn foobar(func: PFN_void) !void { } test "implicit ptr to *anyopaque" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -871,6 +891,7 @@ test "implicit ptr to *anyopaque" { } test "return null from fn() anyerror!?&T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -887,6 +908,7 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A { } test "peer type resolution: [0]u8 and []const u8" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -907,6 +929,7 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 { } test "implicitly cast from [N]T to ?[]const T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -920,6 +943,7 @@ fn castToOptionalSlice() ?[]const u8 { } test "cast u128 to f128 and back" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -941,6 +965,7 @@ fn cast128Float(x: u128) f128 { } test "implicit cast from *[N]T to ?[*]T" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -956,6 +981,7 @@ test "implicit cast from *[N]T to ?[*]T" { } test "implicit cast from *T to ?*anyopaque" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -970,6 +996,7 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void { } test "implicit cast *[0]T to E![]const u8" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -987,6 +1014,7 @@ test "cast from array reference to fn: comptime fn ptr" { try expect(@ptrToInt(f) == @ptrToInt(&global_array)); } test "cast from array reference to fn: runtime fn ptr" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -996,6 +1024,7 @@ test "cast from array reference to fn: runtime fn ptr" { } test "*const [N]null u8 to ?[]const u8" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1034,6 +1063,7 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" { var global_struct: struct { f0: usize } = undefined; test "assignment to optional pointer result loc" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1043,6 +1073,7 @@ test "assignment to optional pointer result loc" { } test "cast between *[N]void and []void" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1052,6 +1083,7 @@ test "cast between *[N]void and []void" { } test "peer resolve arrays of different size to const slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1065,6 +1097,7 @@ fn boolToStr(b: bool) []const u8 { } test "cast f16 to wider types" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1083,6 +1116,7 @@ test "cast f16 to wider types" { } test "cast f128 to narrower types" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1101,6 +1135,7 @@ test "cast f128 to narrower types" { } test "peer type resolution: unreachable, null, slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -1119,6 +1154,7 @@ test "peer type resolution: unreachable, null, slice" { } test "cast i8 fn call peers to i32 result" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO diff --git a/test/behavior/fn_delegation.zig b/test/behavior/fn_delegation.zig index 25ec3dea1b..eee8f52490 100644 --- a/test/behavior/fn_delegation.zig +++ b/test/behavior/fn_delegation.zig @@ -32,6 +32,7 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 { } test "fn delegation" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const foo = Foo{}; diff --git a/test/behavior/ir_block_deps.zig b/test/behavior/ir_block_deps.zig index cbc5cc2419..d7d50b4be1 100644 --- a/test/behavior/ir_block_deps.zig +++ b/test/behavior/ir_block_deps.zig @@ -18,6 +18,7 @@ fn getErrInt() anyerror!i32 { } test "ir block deps" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 3caf777195..78788d6556 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -5,6 +5,7 @@ const expect = testing.expect; const expectEqual = testing.expectEqual; test "passing an optional integer as a parameter" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -25,6 +26,7 @@ test "passing an optional integer as a parameter" { pub const EmptyStruct = struct {}; test "optional pointer to size zero struct" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -34,6 +36,7 @@ test "optional pointer to size zero struct" { } test "equality compare optional pointers" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -58,6 +61,7 @@ fn testNullPtrsEql() !void { } test "optional with void type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -69,6 +73,7 @@ test "optional with void type" { } test "address of unwrap optional" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -89,6 +94,7 @@ test "address of unwrap optional" { } test "nested optional field in struct" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -105,6 +111,7 @@ test "nested optional field in struct" { } test "equality compare optional with non-optional" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -142,6 +149,7 @@ fn test_cmp_optional_non_optional() !void { } test "unwrap function call with optional pointer return value" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -163,6 +171,7 @@ test "unwrap function call with optional pointer return value" { } test "nested orelse" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @@ -189,6 +198,7 @@ test "nested orelse" { } test "self-referential struct through a slice of optional" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO diff --git a/test/behavior/reflection.zig b/test/behavior/reflection.zig index 96c81fe0d0..a181e95b86 100644 --- a/test/behavior/reflection.zig +++ b/test/behavior/reflection.zig @@ -28,6 +28,7 @@ fn dummy(a: bool, b: i32, c: f32) i32 { } test "reflection: @field" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index badaf7ef03..4b73a3a140 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -27,6 +27,7 @@ comptime { } test "slicing" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -68,6 +69,7 @@ test "comptime slice of undefined pointer of length 0" { } test "implicitly cast array of size 0 to slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @@ -80,6 +82,7 @@ fn assertLenIsZero(msg: []const u8) !void { } test "access len index of sentinel-terminated slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const S = struct { @@ -129,6 +132,7 @@ test "slice of type" { } test "generic malloc free" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -187,6 +191,7 @@ test "comptime pointer cast array and then slice" { } test "slicing zero length array" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -202,6 +207,7 @@ test "slicing zero length array" { const x = @intToPtr([*]i32, 0x1000)[0..0x500]; const y = x[0x100..]; test "compile time slice of pointer to hard coded address" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @@ -215,6 +221,7 @@ test "compile time slice of pointer to hard coded address" { } test "slice string literal has correct type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -230,6 +237,7 @@ test "slice string literal has correct type" { } test "result location zero sized array inside struct field implicit cast to slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO const E = struct { @@ -240,6 +248,7 @@ test "result location zero sized array inside struct field implicit cast to slic } test "runtime safety lets us slice from len..len" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -252,6 +261,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 { } test "C pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -262,6 +272,7 @@ test "C pointer" { } test "C pointer slice access" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -291,6 +302,7 @@ fn sliceSum(comptime q: []const u8) i32 { } test "slice type with custom alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; @@ -305,6 +317,7 @@ test "slice type with custom alignment" { } test "obtaining a null terminated slice" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -350,6 +363,7 @@ test "empty array to slice" { } test "@ptrCast slice to pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index ecdd6a1846..8428ea886f 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -9,6 +9,7 @@ const maxInt = std.math.maxInt; top_level_field: i32, test "top level fields" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var instance = @This(){ @@ -42,6 +43,7 @@ const StructWithFields = struct { }; test "non-packed struct has fields padded out to the required alignment" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const foo = StructWithFields{ .a = 5, .b = 1, .c = 10, .d = 2 }; @@ -65,6 +67,7 @@ const SmallStruct = struct { }; test "lower unnamed constants" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var foo = SmallStruct{ .a = 1, .b = 255 }; try expect(foo.first() == 1); try expect(foo.second() == 255); @@ -83,6 +86,7 @@ const StructFoo = struct { }; test "structs" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var foo: StructFoo = undefined; @@ -101,6 +105,7 @@ fn testMutation(foo: *StructFoo) void { } test "struct byval assign" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var foo1: StructFoo = undefined; @@ -134,6 +139,7 @@ fn returnEmptyStructInstance() StructWithNoFields { } test "fn call of struct field" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const Foo = struct { @@ -165,12 +171,14 @@ const MemberFnTestFoo = struct { }; test "call member function directly" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const result = MemberFnTestFoo.member(instance); try expect(result == 1234); } test "store member function in variable" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const instance = MemberFnTestFoo{ .x = 1234 }; const memberFn = MemberFnTestFoo.member; const result = memberFn(instance); @@ -178,6 +186,7 @@ test "store member function in variable" { } test "member functions" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const r = MemberFnRand{ .seed = 1234 }; try expect(r.getSeed() == 1234); } @@ -189,6 +198,7 @@ const MemberFnRand = struct { }; test "return struct byval from function" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const bar = makeBar2(1234, 5678); @@ -206,6 +216,7 @@ fn makeBar2(x: i32, y: i32) Bar { } test "call method with mutable reference to struct with no fields" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -238,6 +249,7 @@ test "usingnamespace within struct scope" { } test "struct field init with catch" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { @@ -296,6 +308,7 @@ const Val = struct { }; test "struct point to self" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -314,6 +327,7 @@ test "struct point to self" { } test "void struct fields" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -334,6 +348,7 @@ const VoidStructFieldsFoo = struct { }; test "return empty struct from fn" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -347,6 +362,7 @@ fn testReturnEmptyStructFromFn() EmptyStruct2 { } test "pass slice of empty struct to fn" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -359,6 +375,7 @@ fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize { } test "self-referencing struct via array member" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -389,6 +406,7 @@ const EmptyStruct = struct { }; test "align 1 field before self referential align 8 field as slice return type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -413,6 +431,7 @@ const APackedStruct = packed struct { }; test "packed struct" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -438,6 +457,7 @@ const Foo96Bits = packed struct { }; test "packed struct 24bits" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -484,6 +504,7 @@ test "packed struct 24bits" { } test "runtime struct initialization of bitfield" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -523,6 +544,7 @@ const Bitfields = packed struct { }; test "native bit field understands endianness" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -546,6 +568,7 @@ test "native bit field understands endianness" { } test "implicit cast packed struct field to const ptr" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -581,6 +604,7 @@ test "zero-bit field in packed struct" { } test "packed struct with non-ABI-aligned field" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -610,6 +634,7 @@ const bit_field_1 = BitField1{ }; test "bit field access" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -642,6 +667,7 @@ fn getC(data: *const BitField1) u2 { } test "default struct initialization fields" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -907,6 +933,7 @@ test "packed struct field passed to generic function" { } test "anonymous struct literal syntax" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -1100,6 +1127,7 @@ test "type coercion of pointer to anon struct literal to pointer to struct" { } test "packed struct with undefined initializers" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig index 001ba538b2..7fe5b8ecb6 100644 --- a/test/behavior/truncate.zig +++ b/test/behavior/truncate.zig @@ -3,6 +3,7 @@ const builtin = @import("builtin"); const expect = std.testing.expect; test "truncate u0 to larger integer allowed and has comptime known result" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var x: u0 = 0; @@ -11,6 +12,7 @@ test "truncate u0 to larger integer allowed and has comptime known result" { } test "truncate.u0.literal" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var z = @truncate(u0, 0); @@ -18,6 +20,7 @@ test "truncate.u0.literal" { } test "truncate.u0.const" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const c0: usize = 0; @@ -26,6 +29,7 @@ test "truncate.u0.const" { } test "truncate.u0.var" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var d: u8 = 2; @@ -34,6 +38,7 @@ test "truncate.u0.var" { } test "truncate i0 to larger integer allowed and has comptime known result" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var x: i0 = 0; @@ -42,6 +47,7 @@ test "truncate i0 to larger integer allowed and has comptime known result" { } test "truncate.i0.literal" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var z = @truncate(i0, 0); @@ -49,6 +55,7 @@ test "truncate.i0.literal" { } test "truncate.i0.const" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const c0: isize = 0; @@ -57,6 +64,7 @@ test "truncate.i0.const" { } test "truncate.i0.var" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var d: i8 = 2; @@ -65,6 +73,7 @@ test "truncate.i0.var" { } test "truncate on comptime integer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var x = @truncate(u16, 9999); diff --git a/test/behavior/var_args.zig b/test/behavior/var_args.zig index 63b8c35e1b..0e37c845b6 100644 --- a/test/behavior/var_args.zig +++ b/test/behavior/var_args.zig @@ -25,6 +25,7 @@ fn readFirstVarArg(args: anytype) void { } test "send void arg to var args" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -84,6 +85,7 @@ fn foo2(args: anytype) bool { } test "array of var args functions" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -93,6 +95,7 @@ test "array of var args functions" { } test "pass zero length array to var args param" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/stage2/aarch64.zig b/test/stage2/aarch64.zig index 64e6f95a84..b16a29f56f 100644 --- a/test/stage2/aarch64.zig +++ b/test/stage2/aarch64.zig @@ -17,15 +17,8 @@ pub fn addCases(ctx: *TestContext) !void { var case = ctx.exe("linux_aarch64 hello world", linux_aarch64); // Regular old hello world case.addCompareOutput( - \\pub export fn _start() noreturn { + \\pub fn main() void { \\ print(); - \\ exit(); - \\} - \\ - \\fn doNothing() void {} - \\ - \\fn answer() u64 { - \\ return 0x1234abcd1234abcd; \\} \\ \\fn print() void { @@ -38,16 +31,6 @@ pub fn addCases(ctx: *TestContext) !void { \\ : "memory", "cc" \\ ); \\} - \\ - \\fn exit() noreturn { - \\ asm volatile ("svc #0" - \\ : - \\ : [number] "{x8}" (93), - \\ [arg1] "{x0}" (0) - \\ : "memory", "cc" - \\ ); - \\ unreachable; - \\} , "Hello, World!\n", ); @@ -102,6 +85,74 @@ pub fn addCases(ctx: *TestContext) !void { , "Hello, World!\n", ); + + case.addCompareOutput( + \\pub fn main() void { + \\ foo(true); + \\} + \\ + \\fn foo(x: bool) void { + \\ if (x) { + \\ print(); + \\ } + \\} + \\ + \\fn print() void { + \\ asm volatile ("svc #0" + \\ : + \\ : [number] "{x8}" (64), + \\ [arg1] "{x0}" (1), + \\ [arg2] "{x1}" (@ptrToInt("Hello, World!\n")), + \\ [arg3] "{x2}" ("Hello, World!\n".len), + \\ : "memory", "cc" + \\ ); + \\} + , + "Hello, World!\n", + ); + } + + { + var case = ctx.exe("large add function", linux_aarch64); + + case.addCompareOutput( + \\pub fn main() void { + \\ assert(add(3, 4) == 791); + \\} + \\ + \\fn add(a: u32, b: u32) u32 { + \\ const x: u32 = blk: { + \\ const c = a + b; // 7 + \\ const d = a + c; // 10 + \\ const e = d + b; // 14 + \\ const f = d + e; // 24 + \\ const g = e + f; // 38 + \\ const h = f + g; // 62 + \\ const i = g + h; // 100 + \\ const j = i + d; // 110 + \\ const k = i + j; // 210 + \\ const l = k + c; // 217 + \\ const m = l + d; // 227 + \\ const n = m + e; // 241 + \\ const o = n + f; // 265 + \\ const p = o + g; // 303 + \\ const q = p + h; // 365 + \\ const r = q + i; // 465 + \\ const s = r + j; // 575 + \\ const t = s + k; // 785 + \\ break :blk t; + \\ }; + \\ const y = x + a; // 788 + \\ const z = y + a; // 791 + \\ return z; + \\} + \\ + \\fn assert(ok: bool) void { + \\ if (!ok) unreachable; + \\} + , + "", + ); } // macOS tests