diff --git a/lib/std/io.zig b/lib/std/io.zig index 0faba2b652..d95997f853 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -29,8 +29,8 @@ pub const default_mode: ModeOverride = if (is_async) Mode.evented else .blocking fn getStdOutHandle() os.fd_t { if (builtin.os.tag == .windows) { - if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_aarch64) { - // TODO: this is just a temporary workaround until we advance x86 backend further along. + if (builtin.zig_backend == .stage2_aarch64) { + // TODO: this is just a temporary workaround until we advance aarch64 backend further along. return os.windows.GetStdHandle(os.windows.STD_OUTPUT_HANDLE) catch os.windows.INVALID_HANDLE_VALUE; } return os.windows.peb().ProcessParameters.hStdOutput; @@ -55,8 +55,8 @@ pub fn getStdOut() File { fn getStdErrHandle() os.fd_t { if (builtin.os.tag == .windows) { - if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_aarch64) { - // TODO: this is just a temporary workaround until we advance x86 backend further along. + if (builtin.zig_backend == .stage2_aarch64) { + // TODO: this is just a temporary workaround until we advance aarch64 backend further along. return os.windows.GetStdHandle(os.windows.STD_ERROR_HANDLE) catch os.windows.INVALID_HANDLE_VALUE; } return os.windows.peb().ProcessParameters.hStdError; @@ -81,8 +81,8 @@ pub fn getStdErr() File { fn getStdInHandle() os.fd_t { if (builtin.os.tag == .windows) { - if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_aarch64) { - // TODO: this is just a temporary workaround until we advance x86 backend further along. + if (builtin.zig_backend == .stage2_aarch64) { + // TODO: this is just a temporary workaround until we advance aarch64 backend further along. return os.windows.GetStdHandle(os.windows.STD_INPUT_HANDLE) catch os.windows.INVALID_HANDLE_VALUE; } return os.windows.peb().ProcessParameters.hStdInput; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 66e1904420..08b6a9950f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -409,10 +409,7 @@ fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void { _ = try self.addInst(.{ .tag = .setcc, .ops = .r_cc, - .data = .{ .r_cc = .{ - .r1 = reg, - .cc = cc, - } }, + .data = .{ .r_cc = .{ .r = reg, .cc = cc } }, }); } @@ -424,14 +421,11 @@ fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void { .rip => .m_rip_cc, else => unreachable, }, - .data = .{ .x_cc = .{ - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, - .cc = cc, - } }, + .data = .{ .x_cc = .{ .cc = cc, .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, }); } @@ -439,11 +433,7 @@ fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bi _ = try self.addInst(.{ .tag = .cmovcc, .ops = .rr_cc, - .data = .{ .rr_cc = .{ - .r1 = reg1, - .r2 = reg2, - .cc = cc, - } }, + .data = .{ .rr_cc = .{ .r1 = reg1, .r2 = reg2, .cc = cc } }, }); } @@ -455,15 +445,11 @@ fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condi .rip => .rm_rip_cc, else => unreachable, }, - .data = .{ .rx_cc = .{ - .r1 = reg, - .cc = cc, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, - } }, + .data = .{ .rx_cc = .{ .r = reg, .cc = cc, .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, }); } @@ -479,10 +465,7 @@ fn asmJccReloc(self: *Self, target: Mir.Inst.Index, cc: bits.Condition) !Mir.Ins return self.addInst(.{ .tag = .jcc, .ops = .inst_cc, - .data = .{ .inst_cc = .{ - .inst = target, - .cc = cc, - } }, + .data = .{ .inst_cc = .{ .inst = target, .cc = cc } }, }); } @@ -503,13 +486,15 @@ fn asmRegister(self: *Self, tag: Mir.Inst.Tag, reg: Register) !void { } fn asmImmediate(self: *Self, tag: Mir.Inst.Tag, imm: Immediate) !void { - const ops: Mir.Inst.Ops = if (imm == .signed) .imm_s else .imm_u; _ = try self.addInst(.{ .tag = tag, - .ops = ops, - .data = .{ .imm = switch (imm) { - .signed => |x| @bitCast(u32, x), - .unsigned => |x| @intCast(u32, x), + .ops = switch (imm) { + .signed => .i_s, + .unsigned => .i_u, + }, + .data = .{ .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), } }, }); } @@ -518,37 +503,43 @@ fn asmRegisterRegister(self: *Self, tag: Mir.Inst.Tag, reg1: Register, reg2: Reg _ = try self.addInst(.{ .tag = tag, .ops = .rr, - .data = .{ .rr = .{ - .r1 = reg1, - .r2 = reg2, - } }, + .data = .{ .rr = .{ .r1 = reg1, .r2 = reg2 } }, }); } fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Immediate) !void { const ops: Mir.Inst.Ops = switch (imm) { .signed => .ri_s, - .unsigned => |x| if (x <= math.maxInt(u32)) .ri_u else .ri64, - }; - const data: Mir.Inst.Data = switch (ops) { - .ri_s => .{ .ri = .{ - .r1 = reg, - .imm = @bitCast(u32, imm.signed), - } }, - .ri_u => .{ .ri = .{ - .r1 = reg, - .imm = @intCast(u32, imm.unsigned), - } }, - .ri64 => .{ .rx = .{ - .r1 = reg, - .payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)), - } }, - else => unreachable, + .unsigned => |u| if (math.cast(u32, u)) |_| .ri_u else .ri64, }; _ = try self.addInst(.{ .tag = tag, .ops = ops, - .data = data, + .data = switch (ops) { + .ri_s, .ri_u => .{ .ri = .{ .r = reg, .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + } } }, + .ri64 => .{ .rx = .{ + .r = reg, + .payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)), + } }, + else => unreachable, + }, + }); +} + +fn asmRegisterRegisterRegister( + self: *Self, + tag: Mir.Inst.Tag, + reg1: Register, + reg2: Register, + reg3: Register, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = .rrr, + .data = .{ .rrr = .{ .r1 = reg1, .r2 = reg2, .r3 = reg3 } }, }); } @@ -559,109 +550,133 @@ fn asmRegisterRegisterImmediate( reg2: Register, imm: Immediate, ) !void { - const ops: Mir.Inst.Ops = switch (imm) { - .signed => .rri_s, - .unsigned => .rri_u, - }; - const data: Mir.Inst.Data = switch (ops) { - .rri_s => .{ .rri = .{ - .r1 = reg1, - .r2 = reg2, - .imm = @bitCast(u32, imm.signed), - } }, - .rri_u => .{ .rri = .{ - .r1 = reg1, - .r2 = reg2, - .imm = @intCast(u32, imm.unsigned), - } }, - else => unreachable, - }; _ = try self.addInst(.{ .tag = tag, - .ops = ops, - .data = data, + .ops = switch (imm) { + .signed => .rri_s, + .unsigned => .rri_u, + }, + .data = .{ .rri = .{ .r1 = reg1, .r2 = reg2, .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + } } }, }); } fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void { - const ops: Mir.Inst.Ops = switch (m) { - .sib => .m_sib, - .rip => .m_rip, - else => unreachable, - }; - const data: Mir.Inst.Data = .{ .payload = switch (ops) { - .m_sib => try self.addExtra(Mir.MemorySib.encode(m)), - .m_rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } }; _ = try self.addInst(.{ .tag = tag, - .ops = ops, - .data = data, - }); -} - -fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate) !void { - const ops: Mir.Inst.Ops = switch (m) { - .sib => if (imm == .signed) .mi_s_sib else .mi_u_sib, - .rip => if (imm == .signed) .mi_s_rip else .mi_u_rip, - else => unreachable, - }; - const payload: u32 = switch (ops) { - .mi_s_sib, .mi_u_sib => try self.addExtra(Mir.MemorySib.encode(m)), - .mi_s_rip, .mi_u_rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }; - const data: Mir.Inst.Data = .{ - .xi = .{ .payload = payload, .imm = switch (imm) { - .signed => |x| @bitCast(u32, x), - .unsigned => |x| @intCast(u32, x), + .ops = switch (m) { + .sib => .m_sib, + .rip => .m_rip, + else => unreachable, + }, + .data = .{ .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, } }, - }; - _ = try self.addInst(.{ - .tag = tag, - .ops = ops, - .data = data, }); } fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) !void { - const ops: Mir.Inst.Ops = switch (m) { - .sib => .rm_sib, - .rip => .rm_rip, - else => unreachable, - }; - const data: Mir.Inst.Data = .{ - .rx = .{ .r1 = reg, .payload = switch (ops) { - .rm_sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rm_rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } }, - }; _ = try self.addInst(.{ .tag = tag, - .ops = ops, - .data = data, + .ops = switch (m) { + .sib => .rm_sib, + .rip => .rm_rip, + else => unreachable, + }, + .data = .{ .rx = .{ .r = reg, .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, }); } fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !void { - const ops: Mir.Inst.Ops = switch (m) { - .sib => .mr_sib, - .rip => .mr_rip, - else => unreachable, - }; - const data: Mir.Inst.Data = .{ - .rx = .{ .r1 = reg, .payload = switch (ops) { - .mr_sib => try self.addExtra(Mir.MemorySib.encode(m)), - .mr_rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } }, - }; _ = try self.addInst(.{ .tag = tag, - .ops = ops, - .data = data, + .ops = switch (m) { + .sib => .mr_sib, + .rip => .mr_rip, + else => unreachable, + }, + .data = .{ .rx = .{ .r = reg, .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, + }); +} + +fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => switch (imm) { + .signed => .mi_sib_s, + .unsigned => .mi_sib_u, + }, + .rip => switch (imm) { + .signed => .mi_rip_s, + .unsigned => .mi_rip_u, + }, + else => unreachable, + }, + .data = .{ .ix = .{ .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + }, .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, + }); +} + +fn asmMemoryRegisterRegister( + self: *Self, + tag: Mir.Inst.Tag, + m: Memory, + reg1: Register, + reg2: Register, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => .mrr_sib, + .rip => .mrr_rip, + else => unreachable, + }, + .data = .{ .rrx = .{ .r1 = reg1, .r2 = reg2, .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, + }); +} + +fn asmMemoryRegisterImmediate( + self: *Self, + tag: Mir.Inst.Tag, + m: Memory, + reg: Register, + imm: Immediate, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => .mri_sib, + .rip => .mri_rip, + else => unreachable, + }, + .data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, }); } @@ -768,18 +783,12 @@ fn gen(self: *Self) InnerError!void { self.mir_instructions.set(backpatch_stack_sub, .{ .tag = .sub, .ops = .ri_u, - .data = .{ .ri = .{ - .r1 = .rsp, - .imm = aligned_stack_end, - } }, + .data = .{ .ri = .{ .r = .rsp, .i = aligned_stack_end } }, }); self.mir_instructions.set(backpatch_stack_add, .{ .tag = .add, .ops = .ri_u, - .data = .{ .ri = .{ - .r1 = .rsp, - .imm = aligned_stack_end, - } }, + .data = .{ .ri = .{ .r = .rsp, .i = aligned_stack_end } }, }); const save_reg_list = try self.addExtra(Mir.SaveRegisterList{ @@ -1392,38 +1401,80 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - if (self.liveness.isUnused(inst)) - return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - - const operand_ty = self.air.typeOf(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); - - const operand_abi_size = operand_ty.abiSize(self.target.*); - const dest_ty = self.air.typeOfIndex(inst); - const dest_abi_size = dest_ty.abiSize(self.target.*); - const dst_mcv: MCValue = blk: { - if (info_a.bits == info_b.bits) { - break :blk operand; - } - if (operand_abi_size > 8 or dest_abi_size > 8) { - return self.fail("TODO implement intCast for abi sizes larger than 8", .{}); - } - - const operand_lock: ?RegisterLock = switch (operand) { + const result = if (self.liveness.isUnused(inst)) .dead else result: { + const src_ty = self.air.typeOf(ty_op.operand); + const src_int_info = src_ty.intInfo(self.target.*); + const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_mcv = try self.resolveInst(ty_op.operand); + const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const reg = try self.register_manager.allocReg(inst, gp); - try self.genSetReg(dest_ty, reg, .{ .immediate = 0 }); - try self.genSetReg(operand_ty, reg, operand); - break :blk MCValue{ .register = reg }; + const dst_ty = self.air.typeOfIndex(inst); + const dst_int_info = dst_ty.intInfo(self.target.*); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_mcv = if (dst_abi_size <= src_abi_size and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try self.allocRegOrMem(inst, true); + + const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; + const signedness: std.builtin.Signedness = if (dst_int_info.signedness == .signed and + src_int_info.signedness == .signed) .signed else .unsigned; + switch (dst_mcv) { + .register => |dst_reg| { + const min_abi_size = @min(dst_abi_size, src_abi_size); + const tag: Mir.Inst.Tag = switch (signedness) { + .signed => .movsx, + .unsigned => if (min_abi_size > 2) .mov else .movzx, + }; + const dst_alias = switch (tag) { + .movsx => dst_reg.to64(), + .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(), + else => unreachable, + }; + switch (src_mcv) { + .register => |src_reg| { + try self.asmRegisterRegister( + tag, + dst_alias, + registerAlias(src_reg, min_abi_size), + ); + }, + .stack_offset => |src_off| { + try self.asmRegisterMemory(tag, dst_alias, Memory.sib( + Memory.PtrSize.fromSize(min_abi_size), + .{ .base = .rbp, .disp = -src_off }, + )); + }, + else => return self.fail("TODO airIntCast from {s} to {s}", .{ + @tagName(src_mcv), + @tagName(dst_mcv), + }), + } + if (self.regExtraBits(min_ty) > 0) try self.truncateRegister(min_ty, dst_reg); + }, + else => { + try self.setRegOrMem(min_ty, dst_mcv, src_mcv); + const extra = dst_abi_size * 8 - dst_int_info.bits; + if (extra > 0) { + try self.genShiftBinOpMir(switch (signedness) { + .signed => .sal, + .unsigned => .shl, + }, dst_ty, dst_mcv, .{ .immediate = extra }); + try self.genShiftBinOpMir(switch (signedness) { + .signed => .sar, + .unsigned => .shr, + }, dst_ty, dst_mcv, .{ .immediate = extra }); + } + }, + } + break :result dst_mcv; }; - - return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { @@ -1550,28 +1601,161 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ty = self.air.typeOf(bin_op.lhs); + + const lhs_mcv = try self.resolveInst(bin_op.lhs); + const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) + lhs_mcv + else + try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv); + const dst_reg = dst_mcv.register; + const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_lock); + + const rhs_mcv = try self.resolveInst(bin_op.rhs); + const rhs_lock = switch (rhs_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const limit_reg = try self.register_manager.allocReg(null, gp); + const limit_mcv = MCValue{ .register = limit_reg }; + const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg); + defer self.register_manager.unlockReg(limit_lock); + + const reg_bits = self.regBitSize(ty); + const cc: Condition = if (ty.isSignedInt()) cc: { + try self.genSetReg(ty, limit_reg, dst_mcv); + try self.genBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); + try self.genBinOpMir(.xor, ty, limit_mcv, .{ + .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + }); + break :cc .o; + } else cc: { + try self.genSetReg(ty, limit_reg, .{ + .immediate = @as(u64, std.math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits), + }); + break :cc .c; + }; + try self.genBinOpMir(.add, ty, dst_mcv, rhs_mcv); + + const abi_size = @intCast(u32, @max(ty.abiSize(self.target.*), 2)); + try self.asmCmovccRegisterRegister( + registerAlias(dst_reg, abi_size), + registerAlias(limit_reg, abi_size), + cc, + ); + break :result dst_mcv; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ty = self.air.typeOf(bin_op.lhs); + + const lhs_mcv = try self.resolveInst(bin_op.lhs); + const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) + lhs_mcv + else + try self.copyToRegisterWithInstTracking(inst, ty, lhs_mcv); + const dst_reg = dst_mcv.register; + const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_lock); + + const rhs_mcv = try self.resolveInst(bin_op.rhs); + const rhs_lock = switch (rhs_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const limit_reg = try self.register_manager.allocReg(null, gp); + const limit_mcv = MCValue{ .register = limit_reg }; + const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg); + defer self.register_manager.unlockReg(limit_lock); + + const reg_bits = self.regBitSize(ty); + const cc: Condition = if (ty.isSignedInt()) cc: { + try self.genSetReg(ty, limit_reg, dst_mcv); + try self.genBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); + try self.genBinOpMir(.xor, ty, limit_mcv, .{ + .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + }); + break :cc .o; + } else cc: { + try self.genSetReg(ty, limit_reg, .{ .immediate = 0 }); + break :cc .c; + }; + try self.genBinOpMir(.sub, ty, dst_mcv, rhs_mcv); + + const abi_size = @intCast(u32, @max(ty.abiSize(self.target.*), 2)); + try self.asmCmovccRegisterRegister( + registerAlias(dst_reg, abi_size), + registerAlias(limit_reg, abi_size), + cc, + ); + break :result dst_mcv; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ty = self.air.typeOf(bin_op.lhs); + + try self.spillRegisters(&.{ .rax, .rdx }); + const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); + defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); + + const lhs_mcv = try self.resolveInst(bin_op.lhs); + const lhs_lock = switch (lhs_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); + + const rhs_mcv = try self.resolveInst(bin_op.rhs); + const rhs_lock = switch (rhs_mcv) { + .register => |reg| self.register_manager.lockReg(reg), + else => null, + }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); + + const limit_reg = try self.register_manager.allocReg(null, gp); + const limit_mcv = MCValue{ .register = limit_reg }; + const limit_lock = self.register_manager.lockRegAssumeUnused(limit_reg); + defer self.register_manager.unlockReg(limit_lock); + + const reg_bits = self.regBitSize(ty); + const cc: Condition = if (ty.isSignedInt()) cc: { + try self.genSetReg(ty, limit_reg, lhs_mcv); + try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv); + try self.genBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); + try self.genBinOpMir(.xor, ty, limit_mcv, .{ + .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, + }); + break :cc .o; + } else cc: { + try self.genSetReg(ty, limit_reg, .{ + .immediate = @as(u64, std.math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits), + }); + break :cc .c; + }; + + const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, lhs_mcv, rhs_mcv); + const abi_size = @intCast(u32, @max(ty.abiSize(self.target.*), 2)); + try self.asmCmovccRegisterRegister( + registerAlias(dst_mcv.register, abi_size), + registerAlias(limit_reg, abi_size), + cc, + ); + break :result dst_mcv; + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1599,6 +1783,7 @@ fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .add_with_overflow => try self.genBinOp(null, .add, bin_op.lhs, bin_op.rhs), .sub_with_overflow => try self.genBinOp(null, .sub, bin_op.lhs, bin_op.rhs), .shl_with_overflow => blk: { + try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const shift_ty = self.air.typeOf(bin_op.rhs); @@ -1889,6 +2074,7 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.spillRegisters(&.{.rcx}); const tag = self.air.instructions.items(.tag)[inst]; + try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); @@ -2018,7 +2204,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); if (err_off > 0) { const shift = @intCast(u6, err_off * 8); - try self.genShiftBinOpMir(.shr, err_union_ty, result.register, .{ .immediate = shift }); + try self.genShiftBinOpMir(.shr, err_union_ty, result, .{ .immediate = shift }); } else { try self.truncateRegister(Type.anyerror, result.register); } @@ -2050,9 +2236,7 @@ fn genUnwrapErrorUnionPayloadMir( const payload_ty = err_union_ty.errorUnionPayload(); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :result MCValue.none; - } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); switch (err_union) { @@ -2065,17 +2249,17 @@ fn genUnwrapErrorUnionPayloadMir( const eu_lock = self.register_manager.lockReg(reg); defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); - const result_reg: Register = if (maybe_inst) |inst| - (try self.copyToRegisterWithInstTracking(inst, err_union_ty, err_union)).register + const result_mcv: MCValue = if (maybe_inst) |inst| + try self.copyToRegisterWithInstTracking(inst, err_union_ty, err_union) else - try self.copyToTmpRegister(err_union_ty, err_union); + .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; if (payload_off > 0) { const shift = @intCast(u6, payload_off * 8); - try self.genShiftBinOpMir(.shr, err_union_ty, result_reg, .{ .immediate = shift }); + try self.genShiftBinOpMir(.shr, err_union_ty, result_mcv, .{ .immediate = shift }); } else { - try self.truncateRegister(payload_ty, result_reg); + try self.truncateRegister(payload_ty, result_mcv.register); } - break :result MCValue{ .register = result_reg }; + break :result result_mcv; }, else => return self.fail("TODO implement genUnwrapErrorUnionPayloadMir for {}", .{err_union}), } @@ -2253,8 +2437,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => |off| try self.asmMemoryImmediate( .mov, - Memory.sib(.byte, .{ .base = .rsp, .disp = pl_abi_size - off }), - Immediate.u(0), + Memory.sib(.byte, .{ .base = .rbp, .disp = pl_abi_size - off }), + Immediate.u(1), ), } } @@ -2473,10 +2657,9 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: { - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.air.typeOf(bin_op.lhs); + const result = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); @@ -2563,54 +2746,44 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { - const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - - if (!is_volatile and self.liveness.isUnused(inst)) { - return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - } - - // this is identical to the `airPtrElemPtr` codegen expect here an - // additional `mov` is needed at the end to get the actual value - const ptr_ty = self.air.typeOf(bin_op.lhs); - const ptr = try self.resolveInst(bin_op.lhs); - const ptr_lock: ?RegisterLock = switch (ptr) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, + const result = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + // this is identical to the `airPtrElemPtr` codegen expect here an + // additional `mov` is needed at the end to get the actual value + + const elem_ty = ptr_ty.elemType2(); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const index_ty = self.air.typeOf(bin_op.rhs); + const index_mcv = try self.resolveInst(bin_op.rhs); + const index_lock = switch (index_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (index_lock) |lock| self.register_manager.unlockReg(lock); + + const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_abi_size); + const offset_lock = self.register_manager.lockRegAssumeUnused(offset_reg); + defer self.register_manager.unlockReg(offset_lock); + + const ptr_mcv = try self.resolveInst(bin_op.lhs); + const elem_ptr_reg = if (ptr_mcv.isRegister() and self.liveness.operandDies(inst, 0)) + ptr_mcv.register + else + try self.copyToTmpRegister(ptr_ty, ptr_mcv); + const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg); + defer self.register_manager.unlockReg(elem_ptr_lock); + try self.asmRegisterRegister(.add, elem_ptr_reg, offset_reg); + + const dst_mcv = try self.allocRegOrMem(inst, true); + const dst_lock = switch (dst_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + try self.load(dst_mcv, .{ .register = elem_ptr_reg }, ptr_ty); + break :result dst_mcv; }; - defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - const index_ty = self.air.typeOf(bin_op.rhs); - const index = try self.resolveInst(bin_op.rhs); - const index_lock: ?RegisterLock = switch (index) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (index_lock) |lock| self.register_manager.unlockReg(lock); - - const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); - const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_reg_lock); - - const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr); - try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); - - const result: MCValue = result: { - if (elem_abi_size > 8) { - return self.fail("TODO copy value with size {} from pointer", .{elem_abi_size}); - } else { - try self.asmRegisterMemory( - .mov, - registerAlias(dst_mcv.register, elem_abi_size), - Memory.sib(Memory.PtrSize.fromSize(elem_abi_size), .{ .base = dst_mcv.register }), - ); - break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) }; - } - }; - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2618,36 +2791,34 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - if (self.liveness.isUnused(inst)) { - return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); - } + const result = if (self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = self.air.typeOf(extra.lhs); + const ptr = try self.resolveInst(extra.lhs); + const ptr_lock: ?RegisterLock = switch (ptr) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.air.typeOf(extra.lhs); - const ptr = try self.resolveInst(extra.lhs); - const ptr_lock: ?RegisterLock = switch (ptr) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, + const elem_ty = ptr_ty.elemType2(); + const elem_abi_size = elem_ty.abiSize(self.target.*); + const index_ty = self.air.typeOf(extra.rhs); + const index = try self.resolveInst(extra.rhs); + const index_lock: ?RegisterLock = switch (index) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (index_lock) |lock| self.register_manager.unlockReg(lock); + + const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); + const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); + defer self.register_manager.unlockReg(offset_reg_lock); + + const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr); + try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); + break :result dst_mcv; }; - defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = elem_ty.abiSize(self.target.*); - const index_ty = self.air.typeOf(extra.rhs); - const index = try self.resolveInst(extra.rhs); - const index_lock: ?RegisterLock = switch (index) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (index_lock) |lock| self.register_manager.unlockReg(lock); - - const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size); - const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_reg_lock); - - const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr); - try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); - - return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none }); + return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { @@ -2728,7 +2899,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { else 0; const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand); - try self.genShiftBinOpMir(.shr, Type.usize, result.register, .{ .immediate = shift }); + try self.genShiftBinOpMir(.shr, Type.usize, result, .{ .immediate = shift }); break :blk MCValue{ .register = registerAlias(result.register, @intCast(u32, layout.tag_size)), }; @@ -2820,12 +2991,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const extra_bits = self.regExtraBits(src_ty); const masked_mcv = if (extra_bits > 0) masked: { const mask_mcv = MCValue{ - .immediate = ((@as(u64, 1) << @intCast(u6, extra_bits)) - 1) << @intCast(u6, src_bits), + .immediate = ((@as(u64, 1) << @intCast(u6, extra_bits)) - 1) << + @intCast(u6, src_bits), }; const tmp_mcv = tmp: { - if (src_mcv.isImmediate() or self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) { - break :tmp src_mcv; - } + if (src_mcv.isImmediate() or self.liveness.operandDies(inst, 0)) break :tmp src_mcv; try self.genSetReg(src_ty, dst_reg, src_mcv); break :tmp dst_mcv; }; @@ -3526,34 +3696,37 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void { fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); + const result = try self.fieldPtr(inst, extra.struct_operand, extra.field_index); return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const result = try self.structFieldPtr(inst, ty_op.operand, index); + const result = try self.fieldPtr(inst, ty_op.operand, index); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } -fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { +fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { if (self.liveness.isUnused(inst)) { return MCValue.dead; } const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - if (struct_ty.zigTypeTag() == .Struct and struct_ty.containerLayout() == .Packed) { - return self.fail("TODO structFieldPtr implement packed structs", .{}); - } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const container_ty = ptr_ty.childType(); + const field_offset = switch (container_ty.containerLayout()) { + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*)), + .Packed => if (container_ty.zigTypeTag() == .Struct and ptr_ty.ptrInfo().data.host_size == 0) + container_ty.packedStructFieldByteOffset(index, self.target.*) + else + 0, + }; const dst_mcv: MCValue = result: { switch (mcv) { .stack_offset => { const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ - .immediate = struct_field_offset, + .immediate = field_offset, }); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); @@ -3563,7 +3736,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde break :result dst_mcv; }, .ptr_stack_offset => |off| { - const ptr_stack_offset = off - @intCast(i32, struct_field_offset); + const ptr_stack_offset = off - @intCast(i32, field_offset); break :result MCValue{ .ptr_stack_offset = ptr_stack_offset }; }, .register => |reg| { @@ -3571,7 +3744,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde defer self.register_manager.unlockReg(reg_lock); const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ - .immediate = struct_field_offset, + .immediate = field_offset, }); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); @@ -3592,7 +3765,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde try self.genBinOpMir(.add, ptr_ty, .{ .register = result_reg }, .{ .register = offset_reg }); break :result MCValue{ .register = result_reg }; }, - else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}), + else => return self.fail("TODO implement fieldPtr for {}", .{mcv}), } }; return dst_mcv; @@ -3609,18 +3782,22 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - if (struct_ty.zigTypeTag() == .Struct and struct_ty.containerLayout() == .Packed) { - return self.fail("TODO airStructFieldVal implement packed structs", .{}); - } - const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*); - const struct_field_ty = struct_ty.structFieldType(index); + const container_ty = self.air.typeOf(operand); + const field_ty = container_ty.structFieldType(index); + const field_bit_offset = switch (container_ty.containerLayout()) { + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8), + .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| + struct_obj.data.packedFieldBitOffset(self.target.*, index) + else + 0, + }; const result: MCValue = result: { switch (mcv) { .stack_offset => |off| { - const stack_offset = off - @intCast(i32, struct_field_offset); - break :result MCValue{ .stack_offset = stack_offset }; + const byte_offset = std.math.divExact(u32, field_bit_offset, 8) catch + return self.fail("TODO implement struct_field_val for a packed struct", .{}); + break :result MCValue{ .stack_offset = off - @intCast(i32, byte_offset) }; }, .register => |reg| { const reg_lock = self.register_manager.lockRegAssumeUnused(reg); @@ -3643,27 +3820,23 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock); // Shift by struct_field_offset. - const shift = @intCast(u8, struct_field_offset * 8); - try self.genShiftBinOpMir(.shr, Type.usize, dst_mcv.register, .{ .immediate = shift }); + try self.genShiftBinOpMir(.shr, Type.usize, dst_mcv, .{ .immediate = field_bit_offset }); - // Mask with reg.bitSize() - struct_field_size - const max_reg_bit_width = Register.rax.bitSize(); - const mask_shift = @intCast(u6, (max_reg_bit_width - struct_field_ty.bitSize(self.target.*))); - const mask = (~@as(u64, 0)) >> mask_shift; + // Mask to field_bit_size bits + const field_bit_size = field_ty.bitSize(self.target.*); + const mask = ~@as(u64, 0) >> @intCast(u6, 64 - field_bit_size); const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask }); try self.genBinOpMir(.@"and", Type.usize, dst_mcv, .{ .register = tmp_reg }); - const signedness: std.builtin.Signedness = blk: { - if (struct_field_ty.zigTypeTag() != .Int) break :blk .unsigned; - break :blk struct_field_ty.intInfo(self.target.*).signedness; - }; - const field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)); - if (signedness == .signed and field_size < 8) { + const signedness = + if (field_ty.isAbiInt()) field_ty.intInfo(self.target.*).signedness else .unsigned; + const field_byte_size = @intCast(u32, field_ty.abiSize(self.target.*)); + if (signedness == .signed and field_byte_size < 8) { try self.asmRegisterRegister( .movsx, dst_mcv.register, - registerAlias(dst_mcv.register, field_size), + registerAlias(dst_mcv.register, field_byte_size), ); } @@ -3707,10 +3880,10 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: const src_ty = self.air.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); if (src_ty.zigTypeTag() == .Vector) { - return self.fail("TODO implement genBinOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } if (src_ty.abiSize(self.target.*) > 8) { - return self.fail("TODO implement genBinOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } switch (src_mcv) { @@ -3805,29 +3978,186 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue } /// Clobbers .rcx for non-immediate shift value. -fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shift: MCValue) !void { - switch (tag) { - .sal, .sar, .shl, .shr => {}, - else => unreachable, - } - - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - blk: { - switch (shift) { +fn genShiftBinOpMir( + self: *Self, + tag: Mir.Inst.Tag, + ty: Type, + lhs_mcv: MCValue, + shift_mcv: MCValue, +) !void { + const rhs_mcv: MCValue = rhs: { + switch (shift_mcv) { .immediate => |imm| switch (imm) { 0 => return, - else => return self.asmRegisterImmediate(tag, registerAlias(reg, abi_size), Immediate.u(imm)), - }, - .register => |shift_reg| { - if (shift_reg == .rcx) break :blk; + else => break :rhs shift_mcv, }, + .register => |shift_reg| if (shift_reg == .rcx) break :rhs shift_mcv, else => {}, } self.register_manager.getRegAssumeFree(.rcx, null); - try self.genSetReg(Type.u8, .rcx, shift); - } + try self.genSetReg(Type.u8, .rcx, shift_mcv); + break :rhs .{ .register = .rcx }; + }; - try self.asmRegisterRegister(tag, registerAlias(reg, abi_size), .cl); + const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + if (abi_size <= 8) { + switch (lhs_mcv) { + .register => |lhs_reg| switch (rhs_mcv) { + .immediate => |rhs_imm| try self.asmRegisterImmediate( + tag, + registerAlias(lhs_reg, abi_size), + Immediate.u(rhs_imm), + ), + .register => |rhs_reg| try self.asmRegisterRegister( + tag, + registerAlias(lhs_reg, abi_size), + registerAlias(rhs_reg, 1), + ), + else => return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ + @tagName(lhs_mcv), + @tagName(rhs_mcv), + }), + }, + .stack_offset => |lhs_off| switch (rhs_mcv) { + .immediate => |rhs_imm| try self.asmMemoryImmediate( + tag, + Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -lhs_off }), + Immediate.u(rhs_imm), + ), + .register => |rhs_reg| try self.asmMemoryRegister( + tag, + Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -lhs_off }), + registerAlias(rhs_reg, 1), + ), + else => return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ + @tagName(lhs_mcv), + @tagName(rhs_mcv), + }), + }, + else => return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ + @tagName(lhs_mcv), + @tagName(rhs_mcv), + }), + } + } else if (abi_size <= 16) { + const tmp_reg = try self.register_manager.allocReg(null, gp); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + const info: struct { offsets: [2]i32, double_tag: Mir.Inst.Tag } = switch (tag) { + .shl, .sal => .{ .offsets = .{ 0, 8 }, .double_tag = .shld }, + .shr, .sar => .{ .offsets = .{ 8, 0 }, .double_tag = .shrd }, + else => unreachable, + }; + switch (lhs_mcv) { + .stack_offset => |dst_off| switch (rhs_mcv) { + .immediate => |rhs_imm| if (rhs_imm == 0) {} else if (rhs_imm < 64) { + try self.asmRegisterMemory( + .mov, + tmp_reg, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + ); + try self.asmMemoryRegisterImmediate( + info.double_tag, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[1] - dst_off }), + tmp_reg, + Immediate.u(rhs_imm), + ); + try self.asmMemoryImmediate( + tag, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + Immediate.u(rhs_imm), + ); + } else { + assert(rhs_imm < 128); + try self.asmRegisterMemory( + .mov, + tmp_reg, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + ); + if (rhs_imm > 64) { + try self.asmRegisterImmediate(tag, tmp_reg, Immediate.u(rhs_imm - 64)); + } + try self.asmMemoryRegister( + .mov, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[1] - dst_off }), + tmp_reg, + ); + switch (tag) { + .shl, .sal, .shr => { + try self.asmRegisterRegister(.xor, tmp_reg.to32(), tmp_reg.to32()); + try self.asmMemoryRegister( + .mov, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + tmp_reg, + ); + }, + .sar => try self.asmMemoryImmediate( + tag, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + Immediate.u(63), + ), + else => unreachable, + } + }, + else => { + const first_reg = try self.register_manager.allocReg(null, gp); + const first_lock = self.register_manager.lockRegAssumeUnused(first_reg); + defer self.register_manager.unlockReg(first_lock); + + const second_reg = try self.register_manager.allocReg(null, gp); + const second_lock = self.register_manager.lockRegAssumeUnused(second_reg); + defer self.register_manager.unlockReg(second_lock); + + try self.genSetReg(Type.u8, .cl, rhs_mcv); + try self.asmRegisterMemory( + .mov, + first_reg, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + ); + try self.asmRegisterMemory( + .mov, + second_reg, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[1] - dst_off }), + ); + switch (tag) { + .shl, .sal, .shr => try self.asmRegisterRegister( + .xor, + tmp_reg.to32(), + tmp_reg.to32(), + ), + .sar => { + try self.asmRegisterRegister(.mov, tmp_reg, first_reg); + try self.asmRegisterImmediate(tag, tmp_reg, Immediate.u(63)); + }, + else => unreachable, + } + try self.asmRegisterRegisterRegister(info.double_tag, second_reg, first_reg, .cl); + try self.asmRegisterRegister(tag, first_reg, .cl); + try self.asmRegisterImmediate(.cmp, .cl, Immediate.u(64)); + try self.asmCmovccRegisterRegister(second_reg, first_reg, .ae); + try self.asmCmovccRegisterRegister(first_reg, tmp_reg, .ae); + try self.asmMemoryRegister( + .mov, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[1] - dst_off }), + second_reg, + ); + try self.asmMemoryRegister( + .mov, + Memory.sib(.qword, .{ .base = .rbp, .disp = info.offsets[0] - dst_off }), + first_reg, + ); + }, + }, + else => return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ + @tagName(lhs_mcv), + @tagName(rhs_mcv), + }), + } + } else return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{ + @tagName(lhs_mcv), + @tagName(rhs_mcv), + }); } /// Result is always a register. @@ -3837,68 +4167,61 @@ fn genShiftBinOp( self: *Self, tag: Air.Inst.Tag, maybe_inst: ?Air.Inst.Index, - lhs: MCValue, - rhs: MCValue, + lhs_mcv: MCValue, + rhs_mcv: MCValue, lhs_ty: Type, rhs_ty: Type, ) !MCValue { - if (lhs_ty.zigTypeTag() == .Vector or lhs_ty.zigTypeTag() == .Float) { - return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); - } - if (lhs_ty.abiSize(self.target.*) > 8) { + if (lhs_ty.zigTypeTag() == .Vector) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } assert(rhs_ty.abiSize(self.target.*) == 1); - const lhs_lock: ?RegisterLock = switch (lhs) { + const lhs_abi_size = lhs_ty.abiSize(self.target.*); + if (lhs_abi_size > 16) { + return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); + } + + try self.register_manager.getReg(.rcx, null); + const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx); + defer self.register_manager.unlockReg(rcx_lock); + + const lhs_lock = switch (lhs_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_lock: ?RegisterLock = switch (rhs) { + const rhs_lock = switch (rhs_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - self.register_manager.getRegAssumeFree(.rcx, null); - const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx); - defer self.register_manager.unlockReg(rcx_lock); - - const dst: MCValue = blk: { + const dst_mcv: MCValue = dst: { if (maybe_inst) |inst| { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - // TODO dst can also be a memory location - if (self.reuseOperand(inst, bin_op.lhs, 0, lhs) and lhs.isRegister()) { - break :blk lhs; - } - break :blk try self.copyToRegisterWithInstTracking(inst, lhs_ty, lhs); + if (self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) break :dst lhs_mcv; } - break :blk MCValue{ .register = try self.copyToTmpRegister(lhs_ty, lhs) }; + const dst_mcv = try self.allocRegOrMemAdvanced(lhs_ty, maybe_inst, true); + try self.setRegOrMem(lhs_ty, dst_mcv, lhs_mcv); + break :dst dst_mcv; }; const signedness = lhs_ty.intInfo(self.target.*).signedness; - switch (tag) { - .shl => try self.genShiftBinOpMir(switch (signedness) { + try self.genShiftBinOpMir(switch (tag) { + .shl, .shl_exact => switch (signedness) { .signed => .sal, .unsigned => .shl, - }, lhs_ty, dst.register, rhs), - - .shl_exact => try self.genShiftBinOpMir(.shl, lhs_ty, dst.register, rhs), - - .shr, - .shr_exact, - => try self.genShiftBinOpMir(switch (signedness) { + }, + .shr, .shr_exact => switch (signedness) { .signed => .sar, .unsigned => .shr, - }, lhs_ty, dst.register, rhs), - + }, else => unreachable, - } - - return dst; + }, lhs_ty, dst_mcv, rhs_mcv); + return dst_mcv; } /// Result is always a register. @@ -3915,7 +4238,8 @@ fn genMulDivBinOp( if (ty.zigTypeTag() == .Vector or ty.zigTypeTag() == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{ty.fmtDebug()}); } - if (ty.abiSize(self.target.*) > 8) { + const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + if (abi_size > 8) { return self.fail("TODO implement genMulDivBinOp for {}", .{ty.fmtDebug()}); } if (tag == .div_float) { @@ -3925,10 +4249,8 @@ fn genMulDivBinOp( assert(self.register_manager.isRegFree(.rax)); assert(self.register_manager.isRegFree(.rdx)); - const reg_locks = self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }); - defer for (reg_locks) |reg| { - self.register_manager.unlockReg(reg); - }; + const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); + defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); const int_info = ty.intInfo(self.target.*); const signedness = int_info.signedness; @@ -3953,35 +4275,24 @@ fn genMulDivBinOp( const mir_tag: Mir.Inst.Tag = switch (signedness) { .signed => switch (tag) { - .mul, .mulwrap => Mir.Inst.Tag.imul, - .div_trunc, .div_exact, .rem => Mir.Inst.Tag.idiv, + .mul, .mulwrap => .imul, + .div_trunc, .div_exact, .rem => .idiv, else => unreachable, }, .unsigned => switch (tag) { - .mul, .mulwrap => Mir.Inst.Tag.mul, - .div_trunc, .div_exact, .rem => Mir.Inst.Tag.div, + .mul, .mulwrap => .mul, + .div_trunc, .div_exact, .rem => .div, else => unreachable, }, }; try self.genIntMulDivOpMir(mir_tag, ty, .signed, lhs, rhs); - switch (signedness) { - .signed => switch (tag) { - .mul, .mulwrap, .div_trunc, .div_exact => return MCValue{ .register = .rax }, - .rem => return MCValue{ .register = .rdx }, - else => unreachable, - }, - .unsigned => switch (tag) { - .mul, .mulwrap, .div_trunc, .div_exact => return MCValue{ - .register = registerAlias(.rax, @intCast(u32, ty.abiSize(self.target.*))), - }, - .rem => return MCValue{ - .register = registerAlias(.rdx, @intCast(u32, ty.abiSize(self.target.*))), - }, - else => unreachable, - }, - } + return .{ .register = registerAlias(switch (tag) { + .mul, .mulwrap, .div_trunc, .div_exact => .rax, + .rem => .rdx, + else => unreachable, + }, abi_size) }; }, .mod => { @@ -3998,14 +4309,14 @@ fn genMulDivBinOp( const result: MCValue = if (maybe_inst) |inst| try self.copyToRegisterWithInstTracking(inst, ty, lhs) else - MCValue{ .register = try self.copyToTmpRegister(ty, lhs) }; + .{ .register = try self.copyToTmpRegister(ty, lhs) }; try self.genBinOpMir(.sub, ty, result, div_floor); return result; }, .unsigned => { try self.genIntMulDivOpMir(.div, ty, .unsigned, lhs, rhs); - return MCValue{ .register = registerAlias(.rdx, @intCast(u32, ty.abiSize(self.target.*))) }; + return .{ .register = registerAlias(.rdx, abi_size) }; }, } }, @@ -4397,7 +4708,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu ); } }, - else => return self.fail("TODO getBinOpMir implement large immediate ABI", .{}), + else => return self.fail("TODO genBinOpMir implement large immediate ABI", .{}), } }, .memory, @@ -4478,28 +4789,28 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu ); } }, - else => return self.fail("TODO getBinOpMir implement large immediate ABI", .{}), + else => return self.fail("TODO genBinOpMir implement large immediate ABI", .{}), } }, .memory, .stack_offset, .ptr_stack_offset, => { - return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); + return self.fail("TODO implement x86 genBinOpMir source memory", .{}); }, .linker_load => { - return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{}); + return self.fail("TODO implement x86 genBinOpMir source symbol at index in linker", .{}); }, .eflags => { - return self.fail("TODO implement x86 ADD/SUB/CMP source eflags", .{}); + return self.fail("TODO implement x86 genBinOpMir source eflags", .{}); }, } }, .memory => { - return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{}); + return self.fail("TODO implement x86 genBinOpMir destination memory", .{}); }, .linker_load => { - return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{}); + return self.fail("TODO implement x86 genBinOpMir destination symbol at index", .{}); }, } } @@ -5437,7 +5748,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const tmp_reg = try self.copyToTmpRegister(ty, operand); if (err_off > 0) { const shift = @intCast(u6, err_off * 8); - try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = shift }); + try self.genShiftBinOpMir(.shr, ty, .{ .register = tmp_reg }, .{ .immediate = shift }); } else { try self.truncateRegister(Type.anyerror, tmp_reg); } @@ -5945,13 +6256,23 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const inputs = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; - const dead = !is_volatile and self.liveness.isUnused(inst); - const result: MCValue = if (dead) .dead else result: { + var result: MCValue = .none; + if (!is_volatile and self.liveness.isUnused(inst)) result = .dead else { + var args = std.StringArrayHashMap(MCValue).init(self.gpa); + try args.ensureTotalCapacity(outputs.len + inputs.len + clobbers_len); + defer { + for (args.values()) |arg| switch (arg) { + .register => |reg| self.register_manager.unlockReg(.{ .register = reg }), + else => {}, + }; + args.deinit(); + } + if (outputs.len > 1) { return self.fail("TODO implement codegen for asm with more than 1 output", .{}); } - const output_constraint: ?[]const u8 = for (outputs) |output| { + for (outputs) |output| { if (output != .none) { return self.fail("TODO implement codegen for non-expr asm", .{}); } @@ -5962,8 +6283,21 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { // for the string, we still use the next u32 for the null terminator. extra_i += (constraint.len + name.len + (2 + 3)) / 4; - break constraint; - } else null; + const mcv: MCValue = if (mem.eql(u8, constraint, "=r")) + .{ .register = self.register_manager.tryAllocReg(inst, gp) orelse + return self.fail("ran out of registers lowering inline asm", .{}) } + else if (mem.startsWith(u8, constraint, "={") and mem.endsWith(u8, constraint, "}")) + .{ .register = parseRegName(constraint["={".len .. constraint.len - "}".len]) orelse + return self.fail("unrecognized register constraint: '{s}'", .{constraint}) } + else + return self.fail("unrecognized constraint: '{s}'", .{constraint}); + args.putAssumeCapacity(name, mcv); + switch (mcv) { + .register => |reg| _ = self.register_manager.lockRegAssumeUnused(reg), + else => {}, + } + if (output == .none) result = mcv; + } for (inputs) |input| { const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); @@ -5997,52 +6331,73 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } } - const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; - - { - var iter = std.mem.tokenize(u8, asm_source, "\n\r"); - while (iter.next()) |ins| { - if (mem.eql(u8, ins, "syscall")) { - try self.asmOpOnly(.syscall); - } else if (mem.indexOf(u8, ins, "push")) |_| { - const arg = ins[4..]; - if (mem.indexOf(u8, arg, "$")) |l| { - const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch { - return self.fail("TODO implement more inline asm int parsing", .{}); - }; - try self.asmImmediate(.push, Immediate.u(n)); - } else if (mem.indexOf(u8, arg, "%%")) |l| { - const reg_name = ins[4 + l + 2 ..]; - const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); - try self.asmRegister(.push, reg); - } else return self.fail("TODO more push operands", .{}); - } else if (mem.indexOf(u8, ins, "pop")) |_| { - const arg = ins[3..]; - if (mem.indexOf(u8, arg, "%%")) |l| { - const reg_name = ins[3 + l + 2 ..]; - const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); - try self.asmRegister(.pop, reg); - } else return self.fail("TODO more pop operands", .{}); - } else { - return self.fail("TODO implement support for more x86 assembly instructions", .{}); + const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; + var line_it = mem.tokenize(u8, asm_source, "\n\r"); + while (line_it.next()) |line| { + var mnem_it = mem.tokenize(u8, line, " \t"); + const mnem = mnem_it.next() orelse continue; + if (mem.startsWith(u8, mnem, "#")) continue; + var arg_it = mem.tokenize(u8, mnem_it.rest(), ", "); + if (std.ascii.eqlIgnoreCase(mnem, "syscall")) { + if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#")) + return self.fail("Too many operands: '{s}'", .{line}); + try self.asmOpOnly(.syscall); + } else if (std.ascii.eqlIgnoreCase(mnem, "push")) { + const src = arg_it.next() orelse + return self.fail("Not enough operands: '{s}'", .{line}); + if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#")) + return self.fail("Too many operands: '{s}'", .{line}); + if (mem.startsWith(u8, src, "$")) { + const imm = std.fmt.parseInt(u32, src["$".len..], 0) catch + return self.fail("Invalid immediate: '{s}'", .{src}); + try self.asmImmediate(.push, Immediate.u(imm)); + } else if (mem.startsWith(u8, src, "%%")) { + const reg = parseRegName(src["%%".len..]) orelse + return self.fail("Invalid register: '{s}'", .{src}); + try self.asmRegister(.push, reg); + } else return self.fail("Unsupported operand: '{s}'", .{src}); + } else if (std.ascii.eqlIgnoreCase(mnem, "pop")) { + const dst = arg_it.next() orelse + return self.fail("Not enough operands: '{s}'", .{line}); + if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#")) + return self.fail("Too many operands: '{s}'", .{line}); + if (mem.startsWith(u8, dst, "%%")) { + const reg = parseRegName(dst["%%".len..]) orelse + return self.fail("Invalid register: '{s}'", .{dst}); + try self.asmRegister(.pop, reg); + } else return self.fail("Unsupported operand: '{s}'", .{dst}); + } else if (std.ascii.eqlIgnoreCase(mnem, "movq")) { + const src = arg_it.next() orelse + return self.fail("Not enough operands: '{s}'", .{line}); + const dst = arg_it.next() orelse + return self.fail("Not enough operands: '{s}'", .{line}); + if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#")) + return self.fail("Too many operands: '{s}'", .{line}); + if (mem.startsWith(u8, src, "%%")) { + const colon = mem.indexOfScalarPos(u8, src, "%%".len + 2, ':'); + const src_reg = parseRegName(src["%%".len .. colon orelse src.len]) orelse + return self.fail("Invalid register: '{s}'", .{src}); + if (colon) |colon_pos| { + const src_disp = std.fmt.parseInt(i32, src[colon_pos + 1 ..], 0) catch + return self.fail("Invalid immediate: '{s}'", .{src}); + if (mem.startsWith(u8, dst, "%[") and mem.endsWith(u8, dst, "]")) { + switch (args.get(dst["%[".len .. dst.len - "]".len]) orelse + return self.fail("no matching constraint for: '{s}'", .{dst})) { + .register => |dst_reg| try self.asmRegisterMemory( + .mov, + dst_reg, + Memory.sib(.qword, .{ .base = src_reg, .disp = src_disp }), + ), + else => return self.fail("Invalid constraint: '{s}'", .{dst}), + } + } else return self.fail("Unsupported operand: '{s}'", .{dst}); + } else return self.fail("Unsupported operand: '{s}'", .{src}); } + } else { + return self.fail("Unsupported instruction: '{s}'", .{mnem}); } } - - if (output_constraint) |output| { - if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail("unrecognized asm output constraint: '{s}'", .{output}); - } - const reg_name = output[2 .. output.len - 1]; - const reg = parseRegName(reg_name) orelse - return self.fail("unrecognized register: '{s}'", .{reg_name}); - break :result .{ .register = reg }; - } else { - break :result .none; - } - }; + } simple: { var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); @@ -6277,20 +6632,25 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .disp = -stack_offset, }), immediate); }, - 8 => { + 3, 5...7 => unreachable, + else => { // 64 bit write to memory would take two mov's anyways so we // insted just use two 32 bit writes to avoid register allocation - try self.asmMemoryImmediate(.mov, Memory.sib(.dword, .{ - .base = base_reg, - .disp = -stack_offset + 4, - }), Immediate.u(@truncate(u32, x_big >> 32))); - try self.asmMemoryImmediate(.mov, Memory.sib(.dword, .{ - .base = base_reg, - .disp = -stack_offset, - }), Immediate.u(@truncate(u32, x_big))); - }, - else => { - return self.fail("TODO implement set abi_size=large stack variable with immediate", .{}); + var offset: i32 = 0; + while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( + .mov, + Memory.sib(.dword, .{ .base = base_reg, .disp = offset - stack_offset }), + if (ty.isSignedInt()) + Immediate.s(@truncate( + i32, + @bitCast(i64, x_big) >> (math.cast(u6, offset * 8) orelse 63), + )) + else + Immediate.u(@truncate( + u32, + if (math.cast(u6, offset * 8)) |shift| x_big >> shift else 0, + )), + ); }, } }, @@ -6391,7 +6751,7 @@ fn genInlineMemcpyRegisterRegister( }), registerAlias(tmp_reg, nearest_power_of_two)); if (nearest_power_of_two > 1) { - try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ + try self.genShiftBinOpMir(.shr, ty, .{ .register = tmp_reg }, .{ .immediate = nearest_power_of_two * 8, }); } @@ -6894,20 +7254,37 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.air.typeOf(extra.ptr); const ptr_mcv = try self.resolveInst(extra.ptr); const val_ty = self.air.typeOf(extra.expected_value); + const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + + try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); + const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); + for (regs_lock) |lock| self.register_manager.unlockReg(lock); const exp_mcv = try self.resolveInst(extra.expected_value); - try self.genSetReg(val_ty, .rax, exp_mcv); + if (val_abi_size > 8) switch (exp_mcv) { + .stack_offset => |exp_off| { + try self.genSetReg(Type.usize, .rax, .{ .stack_offset = exp_off - 0 }); + try self.genSetReg(Type.usize, .rdx, .{ .stack_offset = exp_off - 8 }); + }, + else => return self.fail("TODO implement cmpxchg for {s}", .{@tagName(exp_mcv)}), + } else try self.genSetReg(val_ty, .rax, exp_mcv); const rax_lock = self.register_manager.lockRegAssumeUnused(.rax); defer self.register_manager.unlockReg(rax_lock); const new_mcv = try self.resolveInst(extra.new_value); - const new_reg = try self.copyToTmpRegister(val_ty, new_mcv); + const new_reg: Register = if (val_abi_size > 8) switch (new_mcv) { + .stack_offset => |new_off| new: { + try self.genSetReg(Type.usize, .rbx, .{ .stack_offset = new_off - 0 }); + try self.genSetReg(Type.usize, .rcx, .{ .stack_offset = new_off - 8 }); + break :new undefined; + }, + else => return self.fail("TODO implement cmpxchg for {s}", .{@tagName(exp_mcv)}), + } else try self.copyToTmpRegister(val_ty, new_mcv); const new_lock = self.register_manager.lockRegAssumeUnused(new_reg); defer self.register_manager.unlockReg(new_lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); - const ptr_mem: Memory = switch (ptr_mcv) { + const ptr_mem = switch (ptr_mcv) { .register => |reg| Memory.sib(ptr_size, .{ .base = reg }), .ptr_stack_offset => |off| Memory.sib(ptr_size, .{ .base = .rbp, .disp = -off }), else => Memory.sib(ptr_size, .{ .base = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }), @@ -6916,16 +7293,30 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { defer if (mem_lock) |lock| self.register_manager.unlockReg(lock); try self.spillEflagsIfOccupied(); - _ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{ - .r1 = new_reg, - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } } }); + if (val_abi_size <= 8) { + _ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{ + .r = registerAlias(new_reg, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } } }); + } else { + _ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{ + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }); + } const result: MCValue = result: { if (self.liveness.isUnused(inst)) break :result .dead; - self.eflags_inst = inst; - break :result .{ .register_overflow = .{ .reg = .rax, .eflags = .ne } }; + if (val_abi_size <= 8) { + self.eflags_inst = inst; + break :result .{ .register_overflow = .{ .reg = .rax, .eflags = .ne } }; + } + + const dst_mcv = try self.allocRegOrMem(inst, false); + try self.genSetStack(Type.bool, dst_mcv.stack_offset - 16, .{ .eflags = .ne }, .{}); + try self.genSetStack(Type.usize, dst_mcv.stack_offset - 8, .{ .register = .rdx }, .{}); + try self.genSetStack(Type.usize, dst_mcv.stack_offset - 0, .{ .register = .rax }, .{}); + break :result dst_mcv; }; return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } @@ -6938,9 +7329,10 @@ fn atomicOp( ptr_ty: Type, val_ty: Type, unused: bool, - op: ?std.builtin.AtomicRmwOp, + rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!void { + const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); @@ -6958,7 +7350,7 @@ fn atomicOp( const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); - const ptr_mem: Memory = switch (ptr_mcv) { + const ptr_mem = switch (ptr_mcv) { .register => |reg| Memory.sib(ptr_size, .{ .base = reg }), .ptr_stack_offset => |off| Memory.sib(ptr_size, .{ .base = .rbp, .disp = -off }), else => Memory.sib(ptr_size, .{ .base = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }), @@ -6966,48 +7358,197 @@ fn atomicOp( const mem_lock = if (ptr_mem.base()) |reg| self.register_manager.lockReg(reg) else null; defer if (mem_lock) |lock| self.register_manager.unlockReg(lock); - try self.genSetReg(val_ty, dst_reg, val_mcv); + const method: enum { lock, loop, libcall } = if (val_ty.isRuntimeFloat()) + .loop + else switch (rmw_op orelse .Xchg) { + .Xchg, + .Add, + .Sub, + => if (val_abi_size <= 8) .lock else if (val_abi_size <= 16) .loop else .libcall, + .And, + .Or, + .Xor, + => if (val_abi_size <= 8 and unused) .lock else if (val_abi_size <= 16) .loop else .libcall, + .Nand, + .Max, + .Min, + => if (val_abi_size <= 16) .loop else .libcall, + }; + switch (method) { + .lock => { + const tag: Mir.Inst.Tag = if (rmw_op) |op| switch (op) { + .Xchg => if (unused) .mov else .xchg, + .Add => if (unused) .add else .xadd, + .Sub => if (unused) .sub else .xadd, + .And => .@"and", + .Or => .@"or", + .Xor => .xor, + else => unreachable, + } else switch (order) { + .Unordered, .Monotonic, .Release, .AcqRel => .mov, + .Acquire => unreachable, + .SeqCst => .xchg, + }; - const need_loop = val_ty.isRuntimeFloat() or if (op) |rmw| switch (rmw) { - .Xchg, .Add, .Sub => false, - .And, .Or, .Xor => !unused, - .Nand, .Max, .Min => true, - } else false; - if (!need_loop) { - const tag: Mir.Inst.Tag = if (op) |rmw| switch (rmw) { - .Xchg => if (unused) .mov else .xchg, - .Add => if (unused) .add else .xadd, - .Sub => if (unused) .sub else .xadd, - .And => .@"and", - .Or => .@"or", - .Xor => .xor, - else => unreachable, - } else switch (order) { - .Unordered, .Monotonic, .Release, .AcqRel => .mov, - .Acquire => unreachable, - .SeqCst => .xchg, - }; - if (op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) { - try self.genUnOpMir(.neg, val_ty, .{ .register = dst_reg }); - } - _ = try self.addInst(.{ .tag = tag, .ops = switch (tag) { - .mov, .xchg => .mr_sib, - .xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib, - else => unreachable, - }, .data = .{ .rx = .{ - .r1 = registerAlias(dst_reg, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } } }); - return; + try self.genSetReg(val_ty, dst_reg, val_mcv); + if (rmw_op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) { + try self.genUnOpMir(.neg, val_ty, .{ .register = dst_reg }); + } + _ = try self.addInst(.{ .tag = tag, .ops = switch (tag) { + .mov, .xchg => .mr_sib, + .xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib, + else => unreachable, + }, .data = .{ .rx = .{ + .r = registerAlias(dst_reg, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } } }); + }, + .loop => _ = try self.asmJccReloc(if (val_abi_size <= 8) loop: { + try self.genSetReg(val_ty, dst_reg, val_mcv); + try self.asmRegisterMemory(.mov, registerAlias(.rax, val_abi_size), ptr_mem); + const loop = @intCast(u32, self.mir_instructions.len); + if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { + try self.genSetReg(val_ty, dst_reg, .{ .register = .rax }); + } + if (rmw_op) |op| switch (op) { + .Xchg => try self.genSetReg(val_ty, dst_reg, val_mcv), + .Add => try self.genBinOpMir(.add, val_ty, dst_mcv, val_mcv), + .Sub => try self.genBinOpMir(.sub, val_ty, dst_mcv, val_mcv), + .And => try self.genBinOpMir(.@"and", val_ty, dst_mcv, val_mcv), + .Nand => { + try self.genBinOpMir(.@"and", val_ty, dst_mcv, val_mcv); + try self.genUnOpMir(.not, val_ty, dst_mcv); + }, + .Or => try self.genBinOpMir(.@"or", val_ty, dst_mcv, val_mcv), + .Xor => try self.genBinOpMir(.xor, val_ty, dst_mcv, val_mcv), + .Min, .Max => { + const cc: Condition = switch (if (val_ty.isAbiInt()) + val_ty.intInfo(self.target.*).signedness + else + .unsigned) { + .unsigned => switch (op) { + .Min => .a, + .Max => .b, + else => unreachable, + }, + .signed => switch (op) { + .Min => .g, + .Max => .l, + else => unreachable, + }, + }; + + try self.genBinOpMir(.cmp, val_ty, dst_mcv, val_mcv); + switch (val_mcv) { + .register => |val_reg| try self.asmCmovccRegisterRegister( + registerAlias(dst_reg, val_abi_size), + registerAlias(val_reg, val_abi_size), + cc, + ), + .stack_offset => |val_off| try self.asmCmovccRegisterMemory( + registerAlias(dst_reg, val_abi_size), + Memory.sib( + Memory.PtrSize.fromSize(val_abi_size), + .{ .base = .rbp, .disp = -val_off }, + ), + cc, + ), + else => { + const val_reg = try self.copyToTmpRegister(val_ty, val_mcv); + try self.asmCmovccRegisterRegister( + registerAlias(dst_reg, val_abi_size), + registerAlias(val_reg, val_abi_size), + cc, + ); + }, + } + }, + }; + _ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{ + .r = registerAlias(dst_reg, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } } }); + break :loop loop; + } else loop: { + try self.asmRegisterMemory(.mov, .rax, Memory.sib(.qword, .{ + .base = ptr_mem.sib.base, + .scale_index = ptr_mem.sib.scale_index, + .disp = ptr_mem.sib.disp + 0, + })); + try self.asmRegisterMemory(.mov, .rdx, Memory.sib(.qword, .{ + .base = ptr_mem.sib.base, + .scale_index = ptr_mem.sib.scale_index, + .disp = ptr_mem.sib.disp + 8, + })); + const loop = @intCast(u32, self.mir_instructions.len); + switch (val_mcv) { + .stack_offset => |val_off| { + const val_lo_mem = Memory.sib(.qword, .{ .base = .rbp, .disp = 0 - val_off }); + const val_hi_mem = Memory.sib(.qword, .{ .base = .rbp, .disp = 8 - val_off }); + + if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { + try self.asmRegisterRegister(.mov, .rbx, .rax); + try self.asmRegisterRegister(.mov, .rcx, .rdx); + } + if (rmw_op) |op| switch (op) { + .Xchg => { + try self.asmRegisterMemory(.mov, .rbx, val_lo_mem); + try self.asmRegisterMemory(.mov, .rcx, val_hi_mem); + }, + .Add => { + try self.asmRegisterMemory(.add, .rbx, val_lo_mem); + try self.asmRegisterMemory(.adc, .rcx, val_hi_mem); + }, + .Sub => { + try self.asmRegisterMemory(.sub, .rbx, val_lo_mem); + try self.asmRegisterMemory(.sbb, .rcx, val_hi_mem); + }, + .And => { + try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); + try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); + }, + .Nand => { + try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); + try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); + try self.asmRegister(.not, .rbx); + try self.asmRegister(.not, .rcx); + }, + .Or => { + try self.asmRegisterMemory(.@"or", .rbx, val_lo_mem); + try self.asmRegisterMemory(.@"or", .rcx, val_hi_mem); + }, + .Xor => { + try self.asmRegisterMemory(.xor, .rbx, val_lo_mem); + try self.asmRegisterMemory(.xor, .rcx, val_hi_mem); + }, + else => return self.fail( + "TODO implement x86 atomic loop for large abi {s}", + .{@tagName(op)}, + ), + }; + }, + else => return self.fail( + "TODO implement x86 atomic loop for large abi {s}", + .{@tagName(val_mcv)}, + ), + } + _ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{ + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }); + break :loop loop; + }, .ne), + .libcall => return self.fail("TODO implement x86 atomic libcall", .{}), } - - return self.fail("TODO implement x86 atomic loop", .{}); } fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; + try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); + const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); + defer for (regs_lock) |lock| self.register_manager.unlockReg(lock); + const unused = self.liveness.isUnused(inst); const dst_reg = try self.register_manager.allocReg(if (unused) null else inst, gp); @@ -7587,8 +8128,8 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { switch (int_info.signedness) { .signed => { const shift = @intCast(u6, max_reg_bit_width - int_info.bits); - try self.genShiftBinOpMir(.sal, Type.isize, reg, .{ .immediate = shift }); - try self.genShiftBinOpMir(.sar, Type.isize, reg, .{ .immediate = shift }); + try self.genShiftBinOpMir(.sal, Type.isize, .{ .register = reg }, .{ .immediate = shift }); + try self.genShiftBinOpMir(.sar, Type.isize, .{ .register = reg }, .{ .immediate = shift }); }, .unsigned => { const shift = @intCast(u6, max_reg_bit_width - int_info.bits); diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index cd8389aa49..b65d22807a 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -121,7 +121,9 @@ pub fn lowerMir(emit: *Emit) InnerError!void { .sbb, .sfence, .shl, + .shld, .shr, + .shrd, .sub, .syscall, .@"test", @@ -231,10 +233,10 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE const prefix: Instruction.Prefix = switch (ops) { .lock_m_sib, .lock_m_rip, - .lock_mi_u_sib, - .lock_mi_u_rip, - .lock_mi_s_sib, - .lock_mi_s_rip, + .lock_mi_sib_u, + .lock_mi_rip_u, + .lock_mi_sib_s, + .lock_mi_rip_s, .lock_mr_sib, .lock_mr_rip, .lock_moffs_rax, @@ -249,31 +251,36 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE switch (ops) { .none => {}, - .imm_s => op1 = .{ .imm = Immediate.s(@bitCast(i32, data.imm)) }, - .imm_u => op1 = .{ .imm = Immediate.u(data.imm) }, + .i_s => op1 = .{ .imm = Immediate.s(@bitCast(i32, data.i)) }, + .i_u => op1 = .{ .imm = Immediate.u(data.i) }, .r => op1 = .{ .reg = data.r }, .rr => { op1 = .{ .reg = data.rr.r1 }; op2 = .{ .reg = data.rr.r2 }; }, + .rrr => { + op1 = .{ .reg = data.rrr.r1 }; + op2 = .{ .reg = data.rrr.r2 }; + op3 = .{ .reg = data.rrr.r3 }; + }, .ri_s, .ri_u => { const imm = switch (ops) { - .ri_s => Immediate.s(@bitCast(i32, data.ri.imm)), - .ri_u => Immediate.u(data.ri.imm), + .ri_s => Immediate.s(@bitCast(i32, data.ri.i)), + .ri_u => Immediate.u(data.ri.i), else => unreachable, }; - op1 = .{ .reg = data.ri.r1 }; + op1 = .{ .reg = data.ri.r }; op2 = .{ .imm = imm }; }, .ri64 => { const imm64 = emit.mir.extraData(Mir.Imm64, data.rx.payload).data; - op1 = .{ .reg = data.rx.r1 }; + op1 = .{ .reg = data.rx.r }; op2 = .{ .imm = Immediate.u(Mir.Imm64.decode(imm64)) }; }, .rri_s, .rri_u => { const imm = switch (ops) { - .rri_s => Immediate.s(@bitCast(i32, data.rri.imm)), - .rri_u => Immediate.u(data.rri.imm), + .rri_s => Immediate.s(@bitCast(i32, data.rri.i)), + .rri_u => Immediate.u(data.rri.i), else => unreachable, }; op1 = .{ .reg = data.rri.r1 }; @@ -288,21 +295,21 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE const mrip = emit.mir.extraData(Mir.MemoryRip, data.payload).data; op1 = .{ .mem = Mir.MemoryRip.decode(mrip) }; }, - .mi_s_sib, .mi_u_sib, .lock_mi_s_sib, .lock_mi_u_sib => { - const msib = emit.mir.extraData(Mir.MemorySib, data.xi.payload).data; + .mi_sib_s, .mi_sib_u, .lock_mi_sib_s, .lock_mi_sib_u => { + const msib = emit.mir.extraData(Mir.MemorySib, data.ix.payload).data; const imm = switch (ops) { - .mi_s_sib, .lock_mi_s_sib => Immediate.s(@bitCast(i32, data.xi.imm)), - .mi_u_sib, .lock_mi_u_sib => Immediate.u(data.xi.imm), + .mi_sib_s, .lock_mi_sib_s => Immediate.s(@bitCast(i32, data.ix.i)), + .mi_sib_u, .lock_mi_sib_u => Immediate.u(data.ix.i), else => unreachable, }; op1 = .{ .mem = Mir.MemorySib.decode(msib) }; op2 = .{ .imm = imm }; }, - .mi_u_rip, .mi_s_rip, .lock_mi_u_rip, .lock_mi_s_rip => { - const mrip = emit.mir.extraData(Mir.MemoryRip, data.xi.payload).data; + .mi_rip_u, .mi_rip_s, .lock_mi_rip_u, .lock_mi_rip_s => { + const mrip = emit.mir.extraData(Mir.MemoryRip, data.ix.payload).data; const imm = switch (ops) { - .mi_s_rip, .lock_mi_s_rip => Immediate.s(@bitCast(i32, data.xi.imm)), - .mi_u_rip, .lock_mi_u_rip => Immediate.u(data.xi.imm), + .mi_rip_s, .lock_mi_rip_s => Immediate.s(@bitCast(i32, data.ix.i)), + .mi_rip_u, .lock_mi_rip_u => Immediate.u(data.ix.i), else => unreachable, }; op1 = .{ .mem = Mir.MemoryRip.decode(mrip) }; @@ -310,7 +317,7 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE }, .rm_sib, .mr_sib, .lock_mr_sib => { const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data; - const op_r = .{ .reg = data.rx.r1 }; + const op_r = .{ .reg = data.rx.r }; const op_m = .{ .mem = Mir.MemorySib.decode(msib) }; switch (ops) { .rm_sib => { @@ -326,7 +333,7 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE }, .rm_rip, .mr_rip, .lock_mr_rip => { const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data; - const op_r = .{ .reg = data.rx.r1 }; + const op_r = .{ .reg = data.rx.r }; const op_m = .{ .mem = Mir.MemoryRip.decode(mrip) }; switch (ops) { .rm_rip => { @@ -340,6 +347,30 @@ fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerE else => unreachable, } }, + .mrr_sib => { + const msib = emit.mir.extraData(Mir.MemorySib, data.rrx.payload).data; + op1 = .{ .mem = Mir.MemorySib.decode(msib) }; + op2 = .{ .reg = data.rrx.r1 }; + op2 = .{ .reg = data.rrx.r2 }; + }, + .mrr_rip => { + const mrip = emit.mir.extraData(Mir.MemoryRip, data.rrx.payload).data; + op1 = .{ .mem = Mir.MemoryRip.decode(mrip) }; + op2 = .{ .reg = data.rrx.r1 }; + op2 = .{ .reg = data.rrx.r2 }; + }, + .mri_sib => { + const msib = emit.mir.extraData(Mir.MemorySib, data.rix.payload).data; + op1 = .{ .mem = Mir.MemorySib.decode(msib) }; + op2 = .{ .reg = data.rix.r }; + op3 = .{ .imm = Immediate.u(data.rix.i) }; + }, + .mri_rip => { + const mrip = emit.mir.extraData(Mir.MemoryRip, data.rix.payload).data; + op1 = .{ .mem = Mir.MemoryRip.decode(mrip) }; + op2 = .{ .reg = data.rix.r }; + op3 = .{ .imm = Immediate.u(data.rix.i) }; + }, else => return emit.fail("TODO handle generic encoding: {s}, {s}", .{ @tagName(mnemonic), @tagName(ops), @@ -451,12 +482,12 @@ fn mirMovsx(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { }, .rm_sib => { const msib = emit.mir.extraData(Mir.MemorySib, data.rx.payload).data; - op1 = .{ .reg = data.rx.r1 }; + op1 = .{ .reg = data.rx.r }; op2 = .{ .mem = Mir.MemorySib.decode(msib) }; }, .rm_rip => { const mrip = emit.mir.extraData(Mir.MemoryRip, data.rx.payload).data; - op1 = .{ .reg = data.rx.r1 }; + op1 = .{ .reg = data.rx.r }; op2 = .{ .mem = Mir.MemoryRip.decode(mrip) }; }, else => unreachable, // TODO @@ -495,7 +526,7 @@ fn mirCmovcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { const extra = emit.mir.extraData(Mir.MemorySib, data.payload).data; const mnemonic = mnemonicFromConditionCode("cmov", data.cc); return emit.encode(mnemonic, .{ - .op1 = .{ .reg = data.r1 }, + .op1 = .{ .reg = data.r }, .op2 = .{ .mem = Mir.MemorySib.decode(extra) }, }); }, @@ -504,7 +535,7 @@ fn mirCmovcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { const extra = emit.mir.extraData(Mir.MemoryRip, data.payload).data; const mnemonic = mnemonicFromConditionCode("cmov", data.cc); return emit.encode(mnemonic, .{ - .op1 = .{ .reg = data.r1 }, + .op1 = .{ .reg = data.r }, .op2 = .{ .mem = Mir.MemoryRip.decode(extra) }, }); }, @@ -519,7 +550,7 @@ fn mirSetcc(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { const data = emit.mir.instructions.items(.data)[inst].r_cc; const mnemonic = mnemonicFromConditionCode("set", data.cc); return emit.encode(mnemonic, .{ - .op1 = .{ .reg = data.r1 }, + .op1 = .{ .reg = data.r }, }); }, .m_sib_cc => { diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 891fc4e9a1..de669a9f8d 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -262,15 +262,15 @@ pub fn format( try writer.print("+{s} ", .{tag}); }, .m, .mi, .m1, .mc => try writer.print("/{d} ", .{encoding.modRmExt()}), - .mr, .rm, .rmi => try writer.writeAll("/r "), + .mr, .rm, .rmi, .mri, .mrc => try writer.writeAll("/r "), } switch (encoding.op_en) { - .i, .d, .zi, .oi, .mi, .rmi => { + .i, .d, .zi, .oi, .mi, .rmi, .mri => { const op = switch (encoding.op_en) { .i, .d => encoding.op1, .zi, .oi, .mi => encoding.op2, - .rmi => encoding.op3, + .rmi, .mri => encoding.op3, else => unreachable, }; const tag = switch (op) { @@ -285,7 +285,7 @@ pub fn format( }; try writer.print("{s} ", .{tag}); }, - .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm => {}, + .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc => {}, } try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); @@ -334,7 +334,7 @@ pub const Mnemonic = enum { rcl, rcr, ret, rol, ror, sal, sar, sbb, scas, scasb, scasd, scasq, scasw, - shl, shr, sub, syscall, + shl, shld, shr, shrd, sub, syscall, seta, setae, setb, setbe, setc, sete, setg, setge, setl, setle, setna, setnae, setnb, setnbe, setnc, setne, setng, setnge, setnl, setnle, setno, setnp, setns, setnz, seto, setp, setpe, setpo, sets, setz, @@ -374,7 +374,8 @@ pub const OpEn = enum { i, zi, d, m, fd, td, - m1, mc, mi, mr, rm, rmi, + m1, mc, mi, mr, rm, + rmi, mri, mrc, // zig fmt: on }; diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 59c292c500..a6a4115814 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -138,8 +138,12 @@ pub const Inst = struct { sfence, /// Logical shift left shl, + /// Double precision shift left + shld, /// Logical shift right shr, + /// Double precision shift right + shrd, /// Subtract sub, /// Syscall @@ -284,10 +288,10 @@ pub const Inst = struct { ri64, /// Immediate (sign-extended) operand. /// Uses `imm` payload. - imm_s, + i_s, /// Immediate (unsigned) operand. /// Uses `imm` payload. - imm_u, + i_u, /// Relative displacement operand. /// Uses `imm` payload. rel, @@ -316,23 +320,35 @@ pub const Inst = struct { /// Uses `x_cc` with extra data of type `MemoryRip`. m_rip_cc, /// Memory (SIB), immediate (unsigned) operands. - /// Uses `xi` payload with extra data of type `MemorySib`. - mi_u_sib, + /// Uses `ix` payload with extra data of type `MemorySib`. + mi_sib_u, /// Memory (RIP), immediate (unsigned) operands. - /// Uses `xi` payload with extra data of type `MemoryRip`. - mi_u_rip, + /// Uses `ix` payload with extra data of type `MemoryRip`. + mi_rip_u, /// Memory (SIB), immediate (sign-extend) operands. - /// Uses `xi` payload with extra data of type `MemorySib`. - mi_s_sib, + /// Uses `ix` payload with extra data of type `MemorySib`. + mi_sib_s, /// Memory (RIP), immediate (sign-extend) operands. - /// Uses `xi` payload with extra data of type `MemoryRip`. - mi_s_rip, + /// Uses `ix` payload with extra data of type `MemoryRip`. + mi_rip_s, /// Memory (SIB), register operands. /// Uses `rx` payload with extra data of type `MemorySib`. mr_sib, /// Memory (RIP), register operands. /// Uses `rx` payload with extra data of type `MemoryRip`. mr_rip, + /// Memory (SIB), register, register operands. + /// Uses `rrx` payload with extra data of type `MemorySib`. + mrr_sib, + /// Memory (RIP), register, register operands. + /// Uses `rrx` payload with extra data of type `MemoryRip`. + mrr_rip, + /// Memory (SIB), register, immediate (byte) operands. + /// Uses `rix` payload with extra data of type `MemorySib`. + mri_sib, + /// Memory (RIP), register, immediate (byte) operands. + /// Uses `rix` payload with extra data of type `MemoryRip`. + mri_rip, /// Rax, Memory moffs. /// Uses `payload` with extra data of type `MemoryMoffs`. rax_moffs, @@ -347,16 +363,16 @@ pub const Inst = struct { lock_m_rip, /// Memory (SIB), immediate (unsigned) operands with lock prefix. /// Uses `xi` payload with extra data of type `MemorySib`. - lock_mi_u_sib, + lock_mi_sib_u, /// Memory (RIP), immediate (unsigned) operands with lock prefix. /// Uses `xi` payload with extra data of type `MemoryRip`. - lock_mi_u_rip, + lock_mi_rip_u, /// Memory (SIB), immediate (sign-extend) operands with lock prefix. /// Uses `xi` payload with extra data of type `MemorySib`. - lock_mi_s_sib, + lock_mi_sib_s, /// Memory (RIP), immediate (sign-extend) operands with lock prefix. /// Uses `xi` payload with extra data of type `MemoryRip`. - lock_mi_s_rip, + lock_mi_rip_s, /// Memory (SIB), register operands with lock prefix. /// Uses `rx` payload with extra data of type `MemorySib`. lock_mr_sib, @@ -400,7 +416,7 @@ pub const Inst = struct { cc: bits.Condition, }, /// A 32-bit immediate value. - imm: u32, + i: u32, r: Register, rr: struct { r1: Register, @@ -414,16 +430,16 @@ pub const Inst = struct { rri: struct { r1: Register, r2: Register, - imm: u32, + i: u32, }, /// Condition code (CC), followed by custom payload found in extra. x_cc: struct { - payload: u32, cc: bits.Condition, + payload: u32, }, /// Register with condition code (CC). r_cc: struct { - r1: Register, + r: Register, cc: bits.Condition, }, /// Register, register with condition code (CC). @@ -434,24 +450,36 @@ pub const Inst = struct { }, /// Register, immediate. ri: struct { - r1: Register, - imm: u32, + r: Register, + i: u32, }, /// Register, followed by custom payload found in extra. rx: struct { - r1: Register, + r: Register, payload: u32, }, /// Register with condition code (CC), followed by custom payload found in extra. rx_cc: struct { - r1: Register, + r: Register, cc: bits.Condition, payload: u32, }, - /// Custom payload followed by an immediate. - xi: struct { + /// Immediate, followed by Custom payload found in extra. + ix: struct { + i: u32, + payload: u32, + }, + /// Register, register, followed by Custom payload found in extra. + rrx: struct { + r1: Register, + r2: Register, + payload: u32, + }, + /// Register, byte immediate, followed by Custom payload found in extra. + rix: struct { + r: Register, + i: u8, payload: u32, - imm: u32, }, /// String instruction prefix and width. string: struct { diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index c10bfe4039..76ad26a9a0 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -411,20 +411,17 @@ pub const Memory = union(enum) { dword, qword, tbyte, + dqword, pub fn fromSize(size: u32) PtrSize { - return if (size <= 1) - .byte - else if (size <= 2) - .word - else if (size <= 4) - .dword - else if (size <= 8) - .qword - else if (size == 10) - .tbyte - else - unreachable; + return switch (size) { + 1...1 => .byte, + 2...2 => .word, + 3...4 => .dword, + 5...8 => .qword, + 9...16 => .dqword, + else => unreachable, + }; } pub fn fromBitSize(bit_size: u64) PtrSize { @@ -434,6 +431,7 @@ pub const Memory = union(enum) { 32 => .dword, 64 => .qword, 80 => .tbyte, + 128 => .dqword, else => unreachable, }; } @@ -445,6 +443,7 @@ pub const Memory = union(enum) { .dword => 32, .qword => 64, .tbyte => 80, + .dqword => 128, }; } }; diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index b3de7ec1bd..05f66062ac 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -117,7 +117,7 @@ pub const Instruction = struct { pub fn new(mnemonic: Mnemonic, args: Init) !Instruction { const encoding = (try Encoding.findByMnemonic(mnemonic, args)) orelse { - log.debug("no encoding found for: {s} {s} {s} {s} {s} {s}", .{ + log.err("no encoding found for: {s} {s} {s} {s} {s} {s}", .{ @tagName(args.prefix), @tagName(mnemonic), @tagName(Encoding.Op.fromOperand(args.op1)), @@ -174,7 +174,7 @@ pub const Instruction = struct { .td => try encoder.imm64(inst.op1.mem.moffs.offset), else => { const mem_op = switch (encoding.op_en) { - .m, .mi, .m1, .mc, .mr => inst.op1, + .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.op1, .rm, .rmi => inst.op2, else => unreachable, }; @@ -182,7 +182,7 @@ pub const Instruction = struct { .reg => |reg| { const rm = switch (encoding.op_en) { .m, .mi, .m1, .mc => encoding.modRmExt(), - .mr => inst.op2.reg.lowEnc(), + .mr, .mri, .mrc => inst.op2.reg.lowEnc(), .rm, .rmi => inst.op1.reg.lowEnc(), else => unreachable, }; @@ -191,7 +191,7 @@ pub const Instruction = struct { .mem => |mem| { const op = switch (encoding.op_en) { .m, .mi, .m1, .mc => .none, - .mr => inst.op2, + .mr, .mri, .mrc => inst.op2, .rm, .rmi => inst.op1, else => unreachable, }; @@ -202,7 +202,7 @@ pub const Instruction = struct { switch (encoding.op_en) { .mi => try encodeImm(inst.op2.imm, encoding.op2, encoder), - .rmi => try encodeImm(inst.op3.imm, encoding.op3, encoder), + .rmi, .mri => try encodeImm(inst.op3.imm, encoding.op3, encoder), else => {}, } }, @@ -251,7 +251,7 @@ pub const Instruction = struct { else => unreachable, }; } else null, - .m, .mi, .m1, .mc, .mr => if (inst.op1.isSegmentRegister()) blk: { + .m, .mi, .m1, .mc, .mr, .mri, .mrc => if (inst.op1.isSegmentRegister()) blk: { break :blk switch (inst.op1) { .reg => |r| r, .mem => |m| m.base().?, @@ -275,13 +275,11 @@ pub const Instruction = struct { switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, - .o, .oi => { - rex.b = inst.op1.reg.isExtended(); - }, - .m, .mi, .m1, .mc, .mr, .rm, .rmi => { + .o, .oi => rex.b = inst.op1.reg.isExtended(), + .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc => { const r_op = switch (op_en) { .rm, .rmi => inst.op1, - .mr => inst.op2, + .mr, .mri, .mrc => inst.op2, else => null, }; if (r_op) |op| { @@ -290,7 +288,7 @@ pub const Instruction = struct { const b_x_op = switch (op_en) { .rm, .rmi => inst.op2, - .m, .mi, .m1, .mc, .mr => inst.op1, + .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.op1, else => unreachable, }; switch (b_x_op) { diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 23a125789b..9683ef991a 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -257,8 +257,8 @@ pub const table = &[_]Entry{ .{ .cmpxchg, .mr, .rm8, .r8, .none, .none, &.{ 0x0f, 0xb0 }, 0, .none }, .{ .cmpxchg, .mr, .rm8, .r8, .none, .none, &.{ 0x0f, 0xb0 }, 0, .rex }, - .{ .cmpxchg, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xb1 }, 0, .rex }, - .{ .cmpxchg, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xb1 }, 0, .rex }, + .{ .cmpxchg, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xb1 }, 0, .none }, + .{ .cmpxchg, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xb1 }, 0, .none }, .{ .cmpxchg, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xb1 }, 0, .long }, .{ .cmpxchg8b , .m, .m64, .none, .none, .none, &.{ 0x0f, 0xc7 }, 1, .none }, @@ -693,6 +693,13 @@ pub const table = &[_]Entry{ .{ .shl, .mi, .rm32, .imm8, .none, .none, &.{ 0xc1 }, 4, .none }, .{ .shl, .mi, .rm64, .imm8, .none, .none, &.{ 0xc1 }, 4, .long }, + .{ .shld, .mri, .rm16, .r16, .imm8, .none, &.{ 0x0f, 0xa4 }, 0, .none }, + .{ .shld, .mrc, .rm16, .r16, .cl, .none, &.{ 0x0f, 0xa5 }, 0, .none }, + .{ .shld, .mri, .rm32, .r32, .imm8, .none, &.{ 0x0f, 0xa4 }, 0, .none }, + .{ .shld, .mri, .rm64, .r64, .imm8, .none, &.{ 0x0f, 0xa4 }, 0, .long }, + .{ .shld, .mrc, .rm32, .r32, .cl, .none, &.{ 0x0f, 0xa5 }, 0, .none }, + .{ .shld, .mrc, .rm64, .r64, .cl, .none, &.{ 0x0f, 0xa5 }, 0, .long }, + .{ .shr, .m1, .rm8, .unity, .none, .none, &.{ 0xd0 }, 5, .none }, .{ .shr, .m1, .rm8, .unity, .none, .none, &.{ 0xd0 }, 5, .rex }, .{ .shr, .m1, .rm16, .unity, .none, .none, &.{ 0xd1 }, 5, .none }, @@ -709,6 +716,13 @@ pub const table = &[_]Entry{ .{ .shr, .mi, .rm32, .imm8, .none, .none, &.{ 0xc1 }, 5, .none }, .{ .shr, .mi, .rm64, .imm8, .none, .none, &.{ 0xc1 }, 5, .long }, + .{ .shrd, .mri, .rm16, .r16, .imm8, .none, &.{ 0x0f, 0xac }, 0, .none }, + .{ .shrd, .mrc, .rm16, .r16, .cl, .none, &.{ 0x0f, 0xad }, 0, .none }, + .{ .shrd, .mri, .rm32, .r32, .imm8, .none, &.{ 0x0f, 0xac }, 0, .none }, + .{ .shrd, .mri, .rm64, .r64, .imm8, .none, &.{ 0x0f, 0xac }, 0, .long }, + .{ .shrd, .mrc, .rm32, .r32, .cl, .none, &.{ 0x0f, 0xad }, 0, .none }, + .{ .shrd, .mrc, .rm64, .r64, .cl, .none, &.{ 0x0f, 0xad }, 0, .long }, + .{ .stos, .np, .m8, .none, .none, .none, &.{ 0xaa }, 0, .none }, .{ .stos, .np, .m16, .none, .none, .none, &.{ 0xab }, 0, .none }, .{ .stos, .np, .m32, .none, .none, .none, &.{ 0xab }, 0, .none }, diff --git a/src/codegen.zig b/src/codegen.zig index c48200e845..a99ff18dfd 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1063,13 +1063,12 @@ pub fn genTypedValue( }, .Optional => { if (typed_value.ty.isPtrLikeOptional()) { - if (typed_value.val.isNull()) - return GenResult.mcv(.{ .immediate = 0 }); + if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); var buf: Type.Payload.ElemType = undefined; return genTypedValue(bin_file, src_loc, .{ .ty = typed_value.ty.optionalChild(&buf), - .val = typed_value.val, + .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); } else if (typed_value.ty.abiSize(target) == 1) { return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) }); diff --git a/src/register_manager.zig b/src/register_manager.zig index 1a5d2fd501..fe53ba3b95 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -149,17 +149,26 @@ pub fn RegisterManager( return RegisterLock{ .register = reg }; } + /// Like `lockReg` but locks multiple registers. + pub fn lockRegs( + self: *Self, + comptime count: comptime_int, + regs: [count]Register, + ) [count]?RegisterLock { + var results: [count]?RegisterLock = undefined; + for (&results, regs) |*result, reg| result.* = self.lockReg(reg); + return results; + } + /// Like `lockRegAssumeUnused` but locks multiple registers. pub fn lockRegsAssumeUnused( self: *Self, comptime count: comptime_int, regs: [count]Register, ) [count]RegisterLock { - var buf: [count]RegisterLock = undefined; - for (regs, 0..) |reg, i| { - buf[i] = self.lockRegAssumeUnused(reg); - } - return buf; + var results: [count]RegisterLock = undefined; + for (&results, regs) |*result, reg| result.* = self.lockRegAssumeUnused(reg); + return results; } /// Unlocks the register allowing its re-allocation and re-use. diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 96b1be1778..b2d9816c18 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -70,7 +70,6 @@ test "array concat with undefined" { test "array concat with tuple" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const array: [2]u8 = .{ 1, 2 }; { @@ -641,7 +640,6 @@ test "tuple to array handles sentinel" { } test "array init of container level array variable" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index a1e3af6e9a..e6000cd848 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual; test "cmpxchg" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -96,7 +95,6 @@ test "cmpxchg with ptr" { test "cmpxchg with ignored result" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -143,7 +141,6 @@ var a_global_variable = @as(u32, 1234); test "cmpxchg on a global variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -384,7 +381,6 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void { test "atomics with different types" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 552080c836..8ac87bb9c0 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -157,7 +157,6 @@ test "bitcast generates a temporary value" { } test "@bitCast packed structs at runtime and comptime" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -215,7 +214,6 @@ test "@bitCast extern structs at runtime and comptime" { } test "bitcast packed struct to integer and back" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/10684.zig b/test/behavior/bugs/10684.zig index 8b0bd6ebca..ef104a3f0c 100644 --- a/test/behavior/bugs/10684.zig +++ b/test/behavior/bugs/10684.zig @@ -4,7 +4,6 @@ const expectEqualStrings = std.testing.expectEqualStrings; test "slicing slices" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/12051.zig b/test/behavior/bugs/12051.zig index efbfc88404..5e2087d422 100644 --- a/test/behavior/bugs/12051.zig +++ b/test/behavior/bugs/12051.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/12092.zig b/test/behavior/bugs/12092.zig index 3a7b9766a3..216138d748 100644 --- a/test/behavior/bugs/12092.zig +++ b/test/behavior/bugs/12092.zig @@ -15,7 +15,6 @@ fn takeFoo(foo: *const Foo) !void { test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/12142.zig b/test/behavior/bugs/12142.zig index db303d617a..1efbd0dbb4 100644 --- a/test/behavior/bugs/12142.zig +++ b/test/behavior/bugs/12142.zig @@ -20,7 +20,6 @@ fn letter(e: Letter) u8 { test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/12450.zig b/test/behavior/bugs/12450.zig index 5161e3ffd3..89e5c774e0 100644 --- a/test/behavior/bugs/12450.zig +++ b/test/behavior/bugs/12450.zig @@ -10,7 +10,6 @@ var buffer: [256]u8 = undefined; test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/2578.zig b/test/behavior/bugs/2578.zig index ad4fb133e4..ff8ba141fa 100644 --- a/test/behavior/bugs/2578.zig +++ b/test/behavior/bugs/2578.zig @@ -14,7 +14,6 @@ fn bar(pointer: ?*anyopaque) void { test "fixed" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO bar(t); diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig index 03c687232f..3dbec7bc70 100644 --- a/test/behavior/bugs/6456.zig +++ b/test/behavior/bugs/6456.zig @@ -11,7 +11,6 @@ const text = ; test "issue 6456" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/726.zig b/test/behavior/bugs/726.zig index 1c552e1df1..cc5c2a5fb7 100644 --- a/test/behavior/bugs/726.zig +++ b/test/behavior/bugs/726.zig @@ -4,7 +4,6 @@ const builtin = @import("builtin"); test "@ptrCast from const to nullable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const c: u8 = 4; @@ -15,7 +14,6 @@ test "@ptrCast from const to nullable" { test "@ptrCast from var in empty struct to nullable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const container = struct { diff --git a/test/behavior/bugs/8646.zig b/test/behavior/bugs/8646.zig index 2e181a682e..da9359a60a 100644 --- a/test/behavior/bugs/8646.zig +++ b/test/behavior/bugs/8646.zig @@ -8,7 +8,6 @@ const array = [_][]const []const u8{ test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/9584.zig b/test/behavior/bugs/9584.zig index f80ff05228..307f1689bf 100644 --- a/test/behavior/bugs/9584.zig +++ b/test/behavior/bugs/9584.zig @@ -44,7 +44,6 @@ pub fn b(x: *X) !void { } test { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/empty_tuple_fields.zig b/test/behavior/empty_tuple_fields.zig index 7309dc9b3e..9f1d4dee1a 100644 --- a/test/behavior/empty_tuple_fields.zig +++ b/test/behavior/empty_tuple_fields.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); test "empty file level struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const T = @import("empty_file_level_struct.zig"); @@ -15,7 +14,6 @@ test "empty file level struct" { test "empty file level union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const T = @import("empty_file_level_union.zig"); diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 5113e21452..e854764649 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -96,7 +96,6 @@ test "discard the result of a function that returns a struct" { } test "inline function call that calls optional function pointer, return pointer at callsite interacts correctly with callsite return type" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/lower_strlit_to_vector.zig b/test/behavior/lower_strlit_to_vector.zig index 427379636e..948d708aa7 100644 --- a/test/behavior/lower_strlit_to_vector.zig +++ b/test/behavior/lower_strlit_to_vector.zig @@ -2,7 +2,6 @@ const std = @import("std"); const builtin = @import("builtin"); test "strlit to vector" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/math.zig b/test/behavior/math.zig index d7b8e4764b..9e3c2b02fd 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -560,7 +560,6 @@ fn testUnsignedNegationWrappingEval(x: u16) !void { test "negation wrapping" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO try expectEqual(@as(u1, 1), negateWrap(u1, 1)); } diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 95b39f2170..9fb0a617a3 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -431,7 +431,6 @@ test "alignment of wrapping an optional payload" { test "Optional slice size is optimized" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO try expect(@sizeOf(?[]u8) == @sizeOf([]u8)); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 85214bd7d8..a7dfd46064 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -120,7 +120,6 @@ test "consistent size of packed structs" { } test "correct sizeOf and offsets in packed structs" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -188,7 +187,6 @@ test "correct sizeOf and offsets in packed structs" { } test "nested packed structs" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -254,7 +252,6 @@ test "regular in irregular packed struct" { } test "byte-aligned field pointer offsets" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -397,7 +394,6 @@ test "@ptrToInt on a packed struct field" { } test "optional pointer in packed struct" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -530,7 +526,6 @@ test "nested packed struct field access test" { test "runtime init of unnamed packed struct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO var z: u8 = 123; @@ -545,7 +540,6 @@ test "runtime init of unnamed packed struct type" { test "packed struct passed to callconv(.C) function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -571,7 +565,6 @@ test "packed struct passed to callconv(.C) function" { test "overaligned pointer to packed struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const S = packed struct { a: u32, b: u32 }; var foo: S align(4) = .{ .a = 123, .b = 456 }; diff --git a/test/behavior/packed_struct_explicit_backing_int.zig b/test/behavior/packed_struct_explicit_backing_int.zig index b5d6ed24fb..fab43816da 100644 --- a/test/behavior/packed_struct_explicit_backing_int.zig +++ b/test/behavior/packed_struct_explicit_backing_int.zig @@ -6,7 +6,6 @@ const native_endian = builtin.cpu.arch.endian(); test "packed struct explicit backing integer" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index e5ccfec543..0532212559 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -412,7 +412,6 @@ test "@ptrToInt on null optional at comptime" { test "indexing array with sentinel returns correct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO var s: [:0]const u8 = "abc"; @@ -497,7 +496,6 @@ test "pointer to constant decl preserves alignment" { test "ptrCast comptime known slice to C pointer" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const s: [:0]const u8 = "foo"; diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 2a0944a5b6..029f6838d0 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -688,7 +688,6 @@ test "slice field ptr var" { test "global slice field access" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const S = struct { @@ -733,7 +732,6 @@ test "empty slice ptr is non null" { test "slice decays to many pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; var buf: [8]u8 = "abcdefg\x00".*; const p: [*:0]const u8 = buf[0..7 :0]; @@ -744,7 +742,6 @@ test "write through pointer to optional slice arg" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const S = struct { fn bar(foo: *?[]const u8) !void { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index b250b5b087..b59615f01a 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -387,7 +387,6 @@ const APackedStruct = packed struct { test "packed struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO var foo = APackedStruct{ @@ -496,7 +495,6 @@ const Bitfields = packed struct { test "packed struct fields are ordered from LSB to MSB" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO var all: u64 = 0x7765443322221111; @@ -632,7 +630,6 @@ test "default struct initialization fields" { } test "packed array 24bits" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -827,7 +824,6 @@ test "non-packed struct with u128 entry in union" { } test "packed struct field passed to generic function" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -939,7 +935,6 @@ test "comptime struct field" { } test "tuple element initialized with fn call" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 132cef5c1e..52f5b79723 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -620,7 +620,6 @@ test "switch on error set with single else" { } test "switch capture copies its payload" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig index 1f1bc6bea4..f025e99ee7 100644 --- a/test/behavior/threadlocal.zig +++ b/test/behavior/threadlocal.zig @@ -21,7 +21,6 @@ test "thread local variable" { test "pointer to thread local array" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index 6d8d4eca6d..bc19cddc22 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -23,7 +23,6 @@ test "casting to void with a macro" { } test "initializer list expression" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -52,7 +51,6 @@ test "reference to a struct type" { test "cast negative integer to pointer" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -98,7 +96,6 @@ test "casting or calling a value with a paren-surrounded macro" { test "nested comma operator" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -109,7 +106,6 @@ test "nested comma operator" { test "cast functions" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -123,7 +119,6 @@ test "cast functions" { test "large integer macro" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -133,7 +128,6 @@ test "large integer macro" { test "string literal macro with embedded tab character" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -143,7 +137,6 @@ test "string literal macro with embedded tab character" { test "string and char literals that are not UTF-8 encoded. Issue #12784" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -188,7 +181,6 @@ test "Macro that uses division operator. Issue #13162" { test "Macro that uses remainder operator. Issue #13346" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 3f557bc40e..1cf68a0769 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -381,7 +381,6 @@ test "tuple of struct concatenation and coercion to array" { test "nested runtime conditionals in tuple initializer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; var data: u8 = 0; const x = .{ diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig index 87d4997c8b..74ee7da1dd 100644 --- a/test/behavior/tuple_declarations.zig +++ b/test/behavior/tuple_declarations.zig @@ -7,7 +7,6 @@ const expectEqualStrings = testing.expectEqualStrings; test "tuple declaration type info" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; { const T = struct { comptime u32 align(2) = 1, []const u8 }; @@ -57,7 +56,6 @@ test "tuple declaration type info" { test "Tuple declaration usage" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const T = struct { u32, []const u8 }; var t: T = .{ 1, "foo" }; diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 7f44f350d1..a12949fffd 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -200,7 +200,6 @@ test "Type.ErrorUnion" { test "Type.Opaque" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -348,7 +347,6 @@ test "Type.Struct" { } test "Type.Enum" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 495c1f3195..5a1ab7c2aa 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -568,7 +568,6 @@ test "value from struct @typeInfo default_value can be loaded at comptime" { test "@typeInfo decls and usingnamespace" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const A = struct { const x = 5; diff --git a/test/behavior/typename.zig b/test/behavior/typename.zig index 92dc428903..e8327a1981 100644 --- a/test/behavior/typename.zig +++ b/test/behavior/typename.zig @@ -64,7 +64,6 @@ test "anon field init" { } test "basic" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -228,7 +227,6 @@ test "local variable" { } test "comptime parameters not converted to anytype in function type" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/union.zig b/test/behavior/union.zig index b78bac5c3e..35bdca270e 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1408,7 +1408,6 @@ test "union field ptr - zero sized field" { } test "packed union in packed struct" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -1494,7 +1493,6 @@ test "union reassignment can use previous value" { } test "packed union with zero-bit field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 562e9aba20..3b716692ef 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1267,7 +1267,6 @@ test "store to vector in slice" { test "addition of vectors represented as strings" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const V = @Vector(3, u8); const foo: V = "foo".*; diff --git a/test/tests.zig b/test/tests.zig index 26e684cd0d..517d789b18 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -112,6 +112,7 @@ const test_targets = blk: { .os_tag = .windows, .abi = .gnu, }, + .single_threaded = true, // https://github.com/ziglang/zig/issues/15075 .backend = .stage2_x86_64, },