diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 36e9813c2a..d3e874a211 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -1832,7 +1832,6 @@ pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: Endian) v pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value: anytype, endian: std.builtin.Endian) void { const T = @TypeOf(value); const uN = std.meta.Int(.unsigned, @bitSizeOf(T)); - const Log2N = std.math.Log2Int(T); const bit_shift = @as(u3, @intCast(bit_offset % 8)); const write_size = (bit_count + bit_shift + 7) / 8; @@ -1861,9 +1860,9 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value // Write first byte, using a mask to protects bits preceding bit_offset const head_mask = @as(u8, 0xff) >> bit_shift; - write_bytes[@as(usize, @intCast(i))] &= ~(head_mask << bit_shift); - write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift; - remaining >>= @as(Log2N, @intCast(@as(u4, 8) - bit_shift)); + write_bytes[@intCast(i)] &= ~(head_mask << bit_shift); + write_bytes[@intCast(i)] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift; + remaining = math.shr(T, remaining, @as(u4, 8) - bit_shift); i += delta; // Write bytes[1..bytes.len - 1] diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 134a6859ef..9d4804c8c8 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -329,7 +329,7 @@ pub const MCValue = union(enum) { .load_frame, .reserved_frame, => unreachable, // not offsettable - .immediate => |imm| .{ .immediate = @as(u64, @bitCast(@as(i64, @bitCast(imm)) +% off)) }, + .immediate => |imm| .{ .immediate = @bitCast(@as(i64, @bitCast(imm)) +% off) }, .register => |reg| .{ .register_offset = .{ .reg = reg, .off = off } }, .register_offset => |reg_off| .{ .register_offset = .{ .reg = reg_off.reg, .off = reg_off.off + off }, @@ -606,7 +606,7 @@ const FrameAlloc = struct { fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc { assert(math.isPowerOfTwo(alloc_abi.alignment)); return .{ - .abi_size = @as(u31, @intCast(alloc_abi.size)), + .abi_size = @intCast(alloc_abi.size), .abi_align = math.log2_int(u32, alloc_abi.alignment), .ref_count = 0, }; @@ -694,7 +694,7 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| - @as(u32, @intCast(set_align_stack.alignment.toByteUnitsOptional().?)) + @intCast(set_align_stack.alignment.toByteUnitsOptional().?) else 1, }), @@ -979,7 +979,7 @@ fn fmtTracking(self: *Self) std.fmt.Formatter(formatTracking) { fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; try self.mir_instructions.ensureUnusedCapacity(gpa, 1); - const result_index = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); + const result_index: Mir.Inst.Index = @intCast(self.mir_instructions.len); self.mir_instructions.appendAssumeCapacity(inst); if (inst.tag != .pseudo or switch (inst.ops) { else => true, @@ -1000,11 +1000,11 @@ fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); - const result = @as(u32, @intCast(self.mir_extra.items.len)); + const result: u32 = @intCast(self.mir_extra.items.len); inline for (fields) |field| { self.mir_extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), - i32 => @as(u32, @bitCast(@field(extra, field.name))), + i32 => @bitCast(@field(extra, field.name)), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), }); } @@ -1214,8 +1214,8 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.FixedTag, imm: Immediate) !void { .data = .{ .i = .{ .fixes = tag[0], .i = switch (imm) { - .signed => |s| @as(u32, @bitCast(s)), - .unsigned => |u| @as(u32, @intCast(u)), + .signed => |s| @bitCast(s), + .unsigned => |u| @intCast(u), }, } }, }); @@ -1246,8 +1246,8 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, imm: .fixes = tag[0], .r1 = reg, .i = switch (imm) { - .signed => |s| @as(u32, @bitCast(s)), - .unsigned => |u| @as(u32, @intCast(u)), + .signed => |s| @bitCast(s), + .unsigned => |u| @intCast(u), }, } }, .ri64 => .{ .rx = .{ @@ -1339,8 +1339,8 @@ fn asmRegisterRegisterImmediate( .r1 = reg1, .r2 = reg2, .i = switch (imm) { - .signed => |s| @as(u32, @bitCast(s)), - .unsigned => |u| @as(u32, @intCast(u)), + .signed => |s| @bitCast(s), + .unsigned => |u| @intCast(u), }, } }, }); @@ -1458,7 +1458,7 @@ fn asmRegisterRegisterMemoryImmediate( .fixes = tag[0], .r1 = reg1, .r2 = reg2, - .i = @as(u8, @intCast(imm.unsigned)), + .i = @intCast(imm.unsigned), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1490,8 +1490,8 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Regist fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void { const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) { - .signed => |s| @as(u32, @bitCast(s)), - .unsigned => |u| @as(u32, @intCast(u)), + .signed => |s| @bitCast(s), + .unsigned => |u| @intCast(u), } }); assert(payload + 1 == switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), @@ -1562,7 +1562,7 @@ fn asmMemoryRegisterImmediate( .data = .{ .rix = .{ .fixes = tag[0], .r1 = reg, - .i = @as(u8, @intCast(imm.unsigned)), + .i = @intCast(imm.unsigned), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1617,7 +1617,7 @@ fn gen(self: *Self) InnerError!void { // Eliding the reloc will cause a miscompilation in this case. for (self.exitlude_jump_relocs.items) |jmp_reloc| { self.mir_instructions.items(.data)[jmp_reloc].inst.inst = - @as(u32, @intCast(self.mir_instructions.len)); + @intCast(self.mir_instructions.len); } try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); @@ -1739,7 +1739,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { if (builtin.mode == .Debug) { - const mir_inst = @as(Mir.Inst.Index, @intCast(self.mir_instructions.len)); + const mir_inst: Mir.Inst.Index = @intCast(self.mir_instructions.len); try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } @@ -2034,7 +2034,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { var data_off: i32 = 0; for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { - const index = @as(u32, @intCast(index_usize)); + const index: u32 = @intCast(index_usize); const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); @@ -2052,7 +2052,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { exitlude_jump_reloc.* = try self.asmJmpReloc(undefined); try self.performReloc(skip_reloc); - data_off += @as(i32, @intCast(tag_name.len + 1)); + data_off += @intCast(tag_name.len + 1); } try self.airTrap(); @@ -2169,7 +2169,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { const frame_offset = self.frame_locs.items(.disp); for (stack_frame_order, FrameIndex.named_count..) |*frame_order, frame_index| - frame_order.* = @as(FrameIndex, @enumFromInt(frame_index)); + frame_order.* = @enumFromInt(frame_index); { const SortContext = struct { frame_align: @TypeOf(frame_align), @@ -2197,7 +2197,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { } } - var rbp_offset = @as(i32, @intCast(save_reg_list.count() * 8)); + var rbp_offset: i32 = @intCast(save_reg_list.count() * 8); self.setFrameLoc(.base_ptr, .rbp, &rbp_offset, false); self.setFrameLoc(.ret_addr, .rbp, &rbp_offset, false); self.setFrameLoc(.args_frame, .rbp, &rbp_offset, false); @@ -2212,11 +2212,11 @@ fn computeFrameLayout(self: *Self) !FrameLayout { rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@intFromEnum(FrameIndex.call_frame)] = - @as(u31, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)])); + @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]); return .{ .stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0), - .stack_adjust = @as(u32, @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)])), + .stack_adjust = @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]), .save_reg_list = save_reg_list, }; } @@ -2247,7 +2247,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { _ = self.free_frame_indices.swapRemoveAt(free_i); return frame_index; } - const frame_index = @as(FrameIndex, @enumFromInt(self.frame_allocs.len)); + const frame_index: FrameIndex = @enumFromInt(self.frame_allocs.len); try self.frame_allocs.append(self.gpa, alloc); return frame_index; } @@ -2323,7 +2323,7 @@ const State = struct { fn initRetroactiveState(self: *Self) State { var state: State = undefined; - state.inst_tracking_len = @as(u32, @intCast(self.inst_tracking.count())); + state.inst_tracking_len = @intCast(self.inst_tracking.count()); state.scope_generation = self.scope_generation; return state; } @@ -2394,9 +2394,7 @@ fn restoreState(self: *Self, state: State, deaths: []const Air.Inst.Index, compt self.inst_tracking.getPtr(current_inst).?.trackSpill(self, current_inst); } { - const reg = RegisterManager.regAtTrackedIndex( - @as(RegisterManager.RegisterBitSet.ShiftInt, @intCast(index)), - ); + const reg = RegisterManager.regAtTrackedIndex(@intCast(index)); self.register_manager.freeReg(reg); self.register_manager.getRegAssumeFree(reg, target_maybe_inst); } @@ -2630,7 +2628,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const dst_ty = self.typeOfIndex(inst); const dst_int_info = dst_ty.intInfo(mod); - const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2708,9 +2706,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.typeOfIndex(inst); - const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); + const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2727,7 +2725,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); const dst_info = dst_ty.childType(mod).intInfo(mod); const src_info = src_ty.childType(mod).intInfo(mod); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { + const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { 16 => switch (dst_ty.vectorLen(mod)) { 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, @@ -2750,18 +2748,18 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, else => null, - })) |tag| tag else return self.fail("TODO implement airTrunc for {}", .{ + }) orelse return self.fail("TODO implement airTrunc for {}", .{ dst_ty.fmt(self.bin_file.options.module.?), }); const elem_ty = src_ty.childType(mod); - const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - dst_info.bits))); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(64 - dst_info.bits)); const splat_ty = try mod.vectorType(.{ - .len = @as(u32, @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits))), + .len = @intCast(@divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), .child = elem_ty.ip_index, }); - const splat_abi_size = @as(u32, @intCast(splat_ty.abiSize(mod))); + const splat_abi_size: u32 = @intCast(splat_ty.abiSize(mod)); const splat_val = try mod.intern(.{ .aggregate = .{ .ty = splat_ty.ip_index, @@ -2836,7 +2834,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(ptr_ty.abiSize(mod))), + @intCast(ptr_ty.abiSize(mod)), len_ty, len, ); @@ -2935,6 +2933,10 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.typeOf(bin_op.lhs); + if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail( + "TODO implement addMulSat for {}", + .{ty.fmt(mod)}, + ); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -2966,7 +2968,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, + .immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1, }); if (reg_extra_bits > 0) { const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); @@ -2985,7 +2987,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - ty.bitSize(mod))), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - ty.bitSize(mod)), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3014,6 +3016,10 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.typeOf(bin_op.lhs); + if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail( + "TODO implement addMulSat for {}", + .{ty.fmt(mod)}, + ); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3045,7 +3051,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, + .immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1, }); if (reg_extra_bits > 0) { const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); @@ -3086,6 +3092,10 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.typeOf(bin_op.lhs); + if (ty.zigTypeTag(mod) == .Vector or ty.abiSize(mod) > 8) return self.fail( + "TODO implement addMulSat for {}", + .{ty.fmt(mod)}, + ); try self.spillRegisters(&.{ .rax, .rdx }); const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); @@ -3116,12 +3126,12 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ - .immediate = (@as(u64, 1) << @as(u6, @intCast(reg_bits - 1))) - 1, + .immediate = (@as(u64, 1) << @intCast(reg_bits - 1)) - 1, }); break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - reg_bits)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - reg_bits), }); break :cc .c; }; @@ -3174,13 +3184,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), + @intCast(tuple_ty.structFieldOffset(1, mod)), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), + @intCast(tuple_ty.structFieldOffset(0, mod)), ty, partial_mcv, ); @@ -3247,13 +3257,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), + @intCast(tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), + @intCast(tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0, mod), partial_mcv, ); @@ -3321,7 +3331,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))); + const payload_off: i32 = @intCast(tuple_ty.structFieldOffset(0, mod)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3331,7 +3341,7 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), + @intCast(tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); @@ -3388,13 +3398,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(0, mod))), + @intCast(tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(tuple_ty.structFieldOffset(1, mod))), + @intCast(tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); @@ -3418,7 +3428,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3458,7 +3468,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, @@ -3597,7 +3607,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(mod); - const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod))); + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3629,17 +3639,12 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { defer if (eu_lock) |lock| self.register_manager.unlockReg(lock); const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); - if (err_off > 0) { - const shift = @as(u6, @intCast(err_off * 8)); - try self.genShiftBinOpMir( - .{ ._r, .sh }, - err_union_ty, - result, - .{ .immediate = shift }, - ); - } else { - try self.truncateRegister(Type.anyerror, result.register); - } + if (err_off > 0) try self.genShiftBinOpMir( + .{ ._r, .sh }, + err_union_ty, + result, + .{ .immediate = @as(u6, @intCast(err_off * 8)) }, + ) else try self.truncateRegister(Type.anyerror, result.register); break :result result; }, .load_frame => |frame_addr| break :result .{ .load_frame = .{ @@ -3687,17 +3692,12 @@ fn genUnwrapErrorUnionPayloadMir( try self.copyToRegisterWithInstTracking(inst, err_union_ty, err_union) else .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; - if (payload_off > 0) { - const shift = @as(u6, @intCast(payload_off * 8)); - try self.genShiftBinOpMir( - .{ ._r, .sh }, - err_union_ty, - result_mcv, - .{ .immediate = shift }, - ); - } else { - try self.truncateRegister(payload_ty, result_mcv.register); - } + if (payload_off > 0) try self.genShiftBinOpMir( + .{ ._r, .sh }, + err_union_ty, + result_mcv, + .{ .immediate = @as(u6, @intCast(payload_off * 8)) }, + ) else try self.truncateRegister(payload_ty, result_mcv.register); break :result result_mcv; }, else => return self.fail("TODO implement genUnwrapErrorUnionPayloadMir for {}", .{err_union}), @@ -3729,8 +3729,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); - const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod))); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); + const err_abi_size: u32 = @intCast(err_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3768,8 +3768,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); - const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); - const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3795,8 +3795,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(mod); const err_ty = eu_ty.errorUnionSet(mod); - const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); - const err_abi_size = @as(u32, @intCast(err_ty.abiSize(mod))); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); + const err_abi_size: u32 = @intCast(err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3816,8 +3816,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); - const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3866,7 +3866,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @as(i32, @intCast(pl_ty.abiSize(mod))); + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(mod)); switch (opt_mcv) { else => unreachable, @@ -3905,8 +3905,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); - const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); - const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3927,8 +3927,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); - const pl_off = @as(i32, @intCast(errUnionPayloadOffset(pl_ty, mod))); - const err_off = @as(i32, @intCast(errUnionErrorOffset(pl_ty, mod))); + const pl_off: i32 = @intCast(errUnionPayloadOffset(pl_ty, mod)); + const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3990,7 +3990,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4167,7 +4167,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { // additional `mov` is needed at the end to get the actual value const elem_ty = ptr_ty.elemType2(mod); - const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod)); const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -4307,7 +4307,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { .load_frame => |frame_addr| { if (tag_abi_size <= 8) { const off: i32 = if (layout.tag_align < layout.payload_align) - @as(i32, @intCast(layout.payload_size)) + @intCast(layout.payload_size) else 0; break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{ @@ -4319,13 +4319,13 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }, .register => { const shift: u6 = if (layout.tag_align < layout.payload_align) - @as(u6, @intCast(layout.payload_size * 8)) + @intCast(layout.payload_size * 8) else 0; const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand); try self.genShiftBinOpMir(.{ ._r, .sh }, Type.usize, result, .{ .immediate = shift }); break :blk MCValue{ - .register = registerAlias(result.register, @as(u32, @intCast(layout.tag_size))), + .register = registerAlias(result.register, @intCast(layout.tag_size)), }; }, else => return self.fail("TODO implement get_union_tag for {}", .{operand}), @@ -4341,6 +4341,9 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); + if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{ + src_ty.fmt(mod), + }); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4432,7 +4435,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 }); } else { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - self.regBitSize(dst_ty))), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(64 - self.regBitSize(dst_ty)), }); const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg); defer self.register_manager.unlockReg(imm_lock); @@ -4503,8 +4506,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { .{ ._, .@"or" }, wide_ty, tmp_mcv, - .{ .immediate = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - extra_bits))) << - @as(u6, @intCast(src_bits)) }, + .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(64 - extra_bits)) << + @intCast(src_bits) }, ); break :masked tmp_mcv; } else mat_src_mcv; @@ -4521,7 +4524,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { .{ ._, .@"or" }, Type.u64, dst_mcv, - .{ .immediate = @as(u64, math.maxInt(u64)) << @as(u6, @intCast(src_bits - 64)) }, + .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(src_bits - 64) }, ); break :masked dst_mcv; } else mat_src_mcv.address().offset(8).deref(); @@ -4565,7 +4568,9 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); + const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); + if (src_ty.zigTypeTag(mod) == .Vector or src_abi_size > 8) + return self.fail("TODO implement airPopcount for {}", .{src_ty.fmt(mod)}); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4590,7 +4595,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8)); + const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - src_abi_size * 8); const imm_0_1 = Immediate.u(mask / 0b1_1); const imm_00_11 = Immediate.u(mask / 0b01_01); const imm_0000_1111 = Immediate.u(mask / 0b0001_0001); @@ -4674,8 +4679,13 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { } fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, mem_ok: bool) !MCValue { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; + if (src_ty.zigTypeTag(mod) == .Vector or src_ty.abiSize(mod) > 8) return self.fail( + "TODO implement byteSwap for {}", + .{src_ty.fmt(mod)}, + ); const src_bits = self.regBitSize(src_ty); const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4684,7 +4694,9 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m defer if (src_lock) |lock| self.register_manager.unlockReg(lock); switch (src_bits) { - else => unreachable, + else => return self.fail("TODO implement byteSwap for {}", .{ + src_ty.fmt(mod), + }), 8 => return if ((mem_ok or src_mcv.isRegister()) and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv @@ -4756,7 +4768,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); + const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4776,7 +4788,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { else undefined; - const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - src_abi_size * 8)); + const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - src_abi_size * 8); const imm_0000_1111 = Immediate.u(mask / 0b0001_0001); const imm_00_11 = Immediate.u(mask / 0b01_01); const imm_0_1 = Immediate.u(mask / 0b1_1); @@ -4874,6 +4886,9 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { }), }; const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); + if (scalar_bits == 80) return self.fail("TODO implement airFloatSign for {}", .{ + ty.fmt(mod), + }); const src_mcv = try self.resolveInst(un_op); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -4989,7 +5004,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { + const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, @@ -5016,10 +5031,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 else => null, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ + }) orelse return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5059,7 +5074,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.typeOf(un_op); - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5071,7 +5086,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { + const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -5125,7 +5140,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { .{ .v_ps, .cvtph2 }, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( - @as(u32, @intCast(@divExact(wide_reg.bitSize(), 16))), + @intCast(@divExact(wide_reg.bitSize(), 16)), )), ) else try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5164,7 +5179,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ + }) orelse return self.fail("TODO implement airSqrt for {}", .{ ty.fmt(mod), }); switch (mir_tag[0]) { @@ -5257,10 +5272,10 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.child.toType(); - const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); + const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; - const val_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size)); + const val_byte_off: i32 = @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size); const val_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits; const val_extra_bits = self.regExtraBits(val_ty); @@ -5404,9 +5419,10 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8); const limb_abi_bits = limb_abi_size * 8; + const limb_ty = try mod.intType(.unsigned, limb_abi_bits); const src_bit_size = src_ty.bitSize(mod); - const src_byte_off = @as(i32, @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size)); + const src_byte_off: i32 = @intCast(ptr_info.packed_offset.bit_offset / limb_abi_bits * limb_abi_size); const src_bit_off = ptr_info.packed_offset.bit_offset % limb_abi_bits; const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv); @@ -5423,10 +5439,9 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In .disp = src_byte_off + limb_i * limb_abi_bits, }); - const part_mask = (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - part_bit_size))) << - @as(u6, @intCast(part_bit_off)); - const part_mask_not = part_mask ^ - (@as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_abi_bits))); + const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(64 - part_bit_size)) << + @intCast(part_bit_off); + const part_mask_not = part_mask ^ (@as(u64, math.maxInt(u64)) >> @intCast(64 - limb_abi_bits)); if (limb_abi_size <= 4) { try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.u(part_mask_not)); } else if (math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| { @@ -5443,23 +5458,23 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.genSetReg(tmp_reg, src_ty, src_mcv); + try self.genSetReg(tmp_reg, limb_ty, src_mcv); switch (limb_i) { 0 => try self.genShiftBinOpMir( .{ ._l, .sh }, - src_ty, + limb_ty, tmp_mcv, .{ .immediate = src_bit_off }, ), 1 => try self.genShiftBinOpMir( .{ ._r, .sh }, - src_ty, + limb_ty, tmp_mcv, .{ .immediate = limb_abi_bits - src_bit_off }, ), else => unreachable, } - try self.genBinOpMir(.{ ._, .@"and" }, src_ty, tmp_mcv, .{ .immediate = part_mask }); + try self.genBinOpMir(.{ ._, .@"and" }, limb_ty, tmp_mcv, .{ .immediate = part_mask }); try self.asmMemoryRegister( .{ ._, .@"or" }, limb_mem, @@ -5544,14 +5559,14 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(mod); - const field_offset = @as(i32, @intCast(switch (container_ty.containerLayout(mod)) { + const field_offset: i32 = @intCast(switch (container_ty.containerLayout(mod)) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0) container_ty.packedStructFieldByteOffset(index, mod) else 0, - })); + }); const src_mcv = try self.resolveInst(operand); const dst_mcv = if (switch (src_mcv) { @@ -5578,8 +5593,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); - const field_off = switch (container_ty.containerLayout(mod)) { - .Auto, .Extern => @as(u32, @intCast(container_ty.structFieldOffset(index, mod) * 8)), + const field_off: u32 = switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => @intCast(container_ty.structFieldOffset(index, mod) * 8), .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| struct_obj.packedFieldBitOffset(mod, index) else @@ -5588,9 +5603,31 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { switch (src_mcv) { .load_frame => |frame_addr| { + const field_abi_size: u32 = @intCast(field_ty.abiSize(mod)); if (field_off % 8 == 0) { const off_mcv = - src_mcv.address().offset(@as(i32, @intCast(@divExact(field_off, 8)))).deref(); + src_mcv.address().offset(@intCast(@divExact(field_off, 8))).deref(); + + if (field_abi_size <= 8) { + const int_ty = try mod.intType( + if (field_ty.isAbiInt(mod)) field_ty.intInfo(mod).signedness else .unsigned, + @intCast(field_ty.bitSize(mod)), + ); + + const dst_reg = + try self.register_manager.allocReg(if (field_is_gp) inst else null, gp); + const dst_mcv = MCValue{ .register = dst_reg }; + const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_lock); + + try self.genCopy(int_ty, dst_mcv, off_mcv); + if (self.regExtraBits(field_ty) > 0) try self.truncateRegister(int_ty, dst_reg); + break :result if (field_is_gp) + dst_mcv + else + try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv); + } + if (self.reuseOperand(inst, operand, 0, src_mcv)) break :result off_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); @@ -5598,10 +5635,9 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @as(u32, @intCast(field_ty.abiSize(mod))); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; - const field_byte_off = @as(i32, @intCast(field_off / limb_abi_bits * limb_abi_size)); + const field_byte_off: i32 = @intCast(field_off / limb_abi_bits * limb_abi_size); const field_bit_off = field_off % limb_abi_bits; if (field_abi_size > 8) { @@ -5726,7 +5762,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(mod); - const field_offset = @as(i32, @intCast(parent_ty.structFieldOffset(extra.field_index, mod))); + const field_offset: i32 = @intCast(parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5775,14 +5811,14 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @as(u16, @intCast(@min(src_ty.abiSize(mod), 8))); + const limb_abi_size: u16 = @intCast(@min(src_ty.abiSize(mod), 8)); const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - const limb_bits = @as(u16, @intCast(@min(int_info.bits - byte_off * 8, limb_abi_size * 8))); + const limb_bits: u16 = @intCast(@min(int_info.bits - byte_off * 8, limb_abi_size * 8)); const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, @@ -5790,7 +5826,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: }; if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @as(u6, @intCast(64 - limb_bits)); + const mask = @as(u64, math.maxInt(u64)) >> @intCast(64 - limb_bits); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5803,7 +5839,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5865,7 +5901,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -6085,16 +6121,16 @@ fn genShiftBinOp( rhs_ty: Type, ) !MCValue { const mod = self.bin_file.options.module.?; - if (lhs_ty.zigTypeTag(mod) == .Vector) { - return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); - } + if (lhs_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{ + lhs_ty.fmt(mod), + }); assert(rhs_ty.abiSize(mod) == 1); const lhs_abi_size = lhs_ty.abiSize(mod); - if (lhs_abi_size > 16) { - return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); - } + if (lhs_abi_size > 16) return self.fail("TODO implement genShiftBinOp for {}", .{ + lhs_ty.fmt(mod), + }); try self.register_manager.getReg(.rcx, null); const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx); @@ -6150,11 +6186,12 @@ fn genMulDivBinOp( rhs: MCValue, ) !MCValue { const mod = self.bin_file.options.module.?; - if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { - return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); - } - const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); - const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) return self.fail( + "TODO implement genMulDivBinOp for {}", + .{dst_ty.fmt(mod)}, + ); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mul_wrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6328,7 +6365,13 @@ fn genBinOp( const mod = self.bin_file.options.module.?; const lhs_ty = self.typeOf(lhs_air); const rhs_ty = self.typeOf(rhs_air); - const abi_size = @as(u32, @intCast(lhs_ty.abiSize(mod))); + const abi_size: u32 = @intCast(lhs_ty.abiSize(mod)); + if ((lhs_ty.scalarType(mod).isRuntimeFloat() and + lhs_ty.scalarType(mod).floatBits(self.target.*) == 80) or + lhs_ty.abiSize(mod) > @as(u6, if (self.hasFeature(.avx)) 32 else 16)) + return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(mod), + }); const maybe_mask_reg = switch (air_tag) { else => null, @@ -6339,6 +6382,9 @@ fn genBinOp( } else try self.register_manager.allocReg(null, sse), abi_size, ) else null, + .rem, .mod => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), }; const mask_lock = if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null; @@ -6543,7 +6589,7 @@ fn genBinOp( Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, - .disp = @as(i32, @intCast(@as(i64, @bitCast(addr)))), + .disp = @intCast(@as(i64, @bitCast(addr))), }, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, @@ -6569,7 +6615,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + const mir_tag = @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -7131,7 +7177,7 @@ fn genBinOp( else => unreachable, }, }, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }); @@ -7194,7 +7240,7 @@ fn genBinOp( const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); try self.asmRegisterRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .cmp }, 64 => .{ .v_sd, .cmp }, @@ -7219,7 +7265,7 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), mask_reg, @@ -7228,7 +7274,7 @@ fn genBinOp( Immediate.u(3), // unord ); try self.asmRegisterRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ps, .blendv }, 64 => .{ .v_pd, .blendv }, @@ -7251,7 +7297,7 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), dst_reg, @@ -7262,7 +7308,7 @@ fn genBinOp( } else { const has_blend = self.hasFeature(.sse4_1); try self.asmRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ss, .cmp }, 64 => .{ ._sd, .cmp }, @@ -7287,7 +7333,7 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), mask_reg, @@ -7295,7 +7341,7 @@ fn genBinOp( Immediate.u(if (has_blend) 3 else 7), // unord, ord ); if (has_blend) try self.asmRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .blendv }, 64 => .{ ._pd, .blendv }, @@ -7318,7 +7364,7 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), dst_reg, @@ -7326,7 +7372,7 @@ fn genBinOp( mask_reg, ) else { try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"and" }, 64 => .{ ._pd, .@"and" }, @@ -7349,14 +7395,14 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), dst_reg, mask_reg, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .andn }, 64 => .{ ._pd, .andn }, @@ -7379,14 +7425,14 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), mask_reg, lhs_copy_reg.?, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"or" }, 64 => .{ ._pd, .@"or" }, @@ -7409,7 +7455,7 @@ fn genBinOp( else => unreachable, }, else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + }) orelse return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), dst_reg, @@ -7431,7 +7477,7 @@ fn genBinOpMir( src_mcv: MCValue, ) !void { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7679,10 +7725,10 @@ fn genBinOpMir( else => unreachable, }, .immediate => |src_imm| { - const imm = switch (off) { + const imm: u64 = switch (off) { 0 => src_imm, else => switch (ty_signedness) { - .signed => @as(u64, @bitCast(@as(i64, @bitCast(src_imm)) >> 63)), + .signed => @bitCast(@as(i64, @bitCast(src_imm)) >> 63), .unsigned => 0, }, }; @@ -7755,7 +7801,7 @@ fn genBinOpMir( 0 => src_mcv, else => .{ .immediate = 0 }, }, - .memory => |addr| .{ .memory = @as(u64, @bitCast(@as(i64, @bitCast(addr)) + off)) }, + .memory => |addr| .{ .memory = @bitCast(@as(i64, @bitCast(addr)) + off) }, .indirect => |reg_off| .{ .indirect = .{ .reg = reg_off.reg, .off = reg_off.off + off, @@ -7782,7 +7828,7 @@ fn genBinOpMir( /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); + const abi_size: u32 = @intCast(dst_ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -8016,7 +8062,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); - const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); + const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.typeOf(callee); const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -8109,7 +8155,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = atom.getOffsetTableAddress(elf_file); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @as(i32, @intCast(got_addr)), + .disp = @intCast(got_addr), })); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(owner_decl); @@ -8126,7 +8172,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = p9.getAtom(atom_index); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @as(i32, @intCast(atom.getOffsetTableAddress(p9))), + .disp = @intCast(atom.getOffsetTableAddress(p9)), })); } else unreachable; } else if (func_value.getExternFunc(mod)) |extern_func| { @@ -8246,7 +8292,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result = MCValue{ .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @as(u16, @intCast(ty.abiSize(mod))); + const abi_size: u16 = @intCast(ty.abiSize(mod)); const may_flip: enum { may_flip, must_flip, @@ -8443,7 +8489,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; const op_ty = self.typeOf(un_op); - const op_abi_size = @as(u32, @intCast(op_ty.abiSize(mod))); + const op_abi_size: u32 = @intCast(op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8652,7 +8698,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool }; + .{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8672,14 +8718,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.ip_index == .bool_type); - const opt_abi_size = @as(u32, @intCast(opt_ty.abiSize(mod))); + const opt_abi_size: u32 = @intCast(opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -8698,7 +8744,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8711,7 +8757,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8743,7 +8789,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @as(i32, @intCast(pl_ty.abiSize(mod))), .ty = Type.bool }; + .{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8752,7 +8798,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @as(u32, @intCast(some_info.ty.abiSize(mod))); + const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8785,12 +8831,11 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const tmp_reg = try self.copyToTmpRegister(ty, operand); if (err_off > 0) { - const shift = @as(u6, @intCast(err_off * 8)); try self.genShiftBinOpMir( .{ ._r, .sh }, ty, .{ .register = tmp_reg }, - .{ .immediate = shift }, + .{ .immediate = @as(u6, @intCast(err_off * 8)) }, ); } else { try self.truncateRegister(Type.anyerror, tmp_reg); @@ -8945,7 +8990,7 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; - const jmp_target = @as(u32, @intCast(self.mir_instructions.len)); + const jmp_target: u32 = @intCast(self.mir_instructions.len); self.scope_generation += 1; const state = try self.saveState(); @@ -9017,10 +9062,8 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @as( - []const Air.Inst.Ref, - @ptrCast(self.air.extra[case.end..][0..case.data.items_len]), - ); + const items: []const Air.Inst.Ref = + @ptrCast(self.air.extra[case.end..][0..case.data.items_len]); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; @@ -9068,7 +9111,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { } fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { - const next_inst = @as(u32, @intCast(self.mir_instructions.len)); + const next_inst: u32 = @intCast(self.mir_instructions.len); switch (self.mir_instructions.items(.tag)[reloc]) { .j, .jmp => {}, .pseudo => switch (self.mir_instructions.items(.ops)[reloc]) { @@ -9143,11 +9186,12 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); - const clobbers_len = @as(u31, @truncate(extra.data.flags)); + const clobbers_len: u31 = @truncate(extra.data.flags); var extra_i: usize = extra.end; - const outputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len])); + const outputs: []const Air.Inst.Ref = + @ptrCast(self.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; - const inputs = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len])); + const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; var result: MCValue = .none; @@ -9282,16 +9326,14 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else if (mem.startsWith(u8, op_str, "$")) { if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| { if (mnem_size) |size| { - const max = @as(u64, math.maxInt(u64)) >> - @as(u6, @intCast(64 - (size.bitSize() - 1))); + const max = @as(u64, math.maxInt(u64)) >> @intCast(64 - (size.bitSize() - 1)); if ((if (s < 0) ~s else s) > max) return self.fail("Invalid immediate size: '{s}'", .{op_str}); } op.* = .{ .imm = Immediate.s(s) }; } else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| { if (mnem_size) |size| { - const max = @as(u64, math.maxInt(u64)) >> - @as(u6, @intCast(64 - size.bitSize())); + const max = @as(u64, math.maxInt(u64)) >> @intCast(64 - size.bitSize()); if (u > max) return self.fail("Invalid immediate size: '{s}'", .{op_str}); } @@ -9643,7 +9685,7 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9668,7 +9710,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr try self.asmRegisterImmediate( .{ ._, .mov }, registerAlias(dst_reg, abi_size), - Immediate.s(@as(i32, @intCast(@as(i64, @bitCast(imm))))), + Immediate.s(@intCast(@as(i64, @bitCast(imm)))), ); } else { try self.asmRegisterImmediate( @@ -9726,7 +9768,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { else => switch (abi_size) { 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9750,7 +9792,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 80 => null, else => unreachable, }, - })) |tag| tag else return self.fail("TODO implement genSetReg for {}", .{ + }) orelse return self.fail("TODO implement genSetReg for {}", .{ ty.fmt(self.bin_file.options.module.?), }), registerAlias(dst_reg, abi_size), @@ -9930,9 +9972,9 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { - .none => .{ .immediate = @as(u64, @bitCast(@as(i64, disp))) }, + .none => .{ .immediate = @bitCast(@as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, .frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } }, }; @@ -9943,7 +9985,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .immediate => |imm| switch (abi_size) { 1, 2, 4 => { const immediate = if (ty.isSignedInt(mod)) - Immediate.s(@as(i32, @truncate(@as(i64, @bitCast(imm))))) + Immediate.s(@truncate(@as(i64, @bitCast(imm)))) else Immediate.u(@as(u32, @intCast(imm))); try self.asmMemoryImmediate( @@ -9965,10 +10007,9 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), if (ty.isSignedInt(mod)) - Immediate.s(@as( - i32, + Immediate.s( @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)), - )) + ) else Immediate.u(@as( u32, @@ -10079,7 +10120,7 @@ fn genLazySymbolRef( _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) }); + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(got_addr) }); switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -10101,7 +10142,7 @@ fn genLazySymbolRef( _ = atom.getOrCreateOffsetTableEntry(p9_file); const got_addr = atom.getOffsetTableAddress(p9_file); const got_mem = - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @as(i32, @intCast(got_addr)) }); + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(got_addr) }); switch (tag) { .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), .call => try self.asmMemory(.{ ._, .call }, got_mem), @@ -10197,8 +10238,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @as(u16, @intCast(dst_ty.abiSize(mod))); - const bit_size = @as(u16, @intCast(dst_ty.bitSize(mod))); + const abi_size: u16 = @intCast(dst_ty.abiSize(mod)); + const bit_size: u16 = @intCast(dst_ty.bitSize(mod)); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10239,7 +10280,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(ptr_ty.abiSize(mod))), + @intCast(ptr_ty.abiSize(mod)), Type.usize, .{ .immediate = array_len }, ); @@ -10253,7 +10294,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.typeOf(ty_op.operand); - const src_bits = @as(u32, @intCast(src_ty.bitSize(mod))); + const src_bits: u32 = @intCast(src_ty.bitSize(mod)); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const dst_ty = self.typeOfIndex(inst); @@ -10281,7 +10322,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { + const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { .Float => switch (dst_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, @@ -10289,7 +10330,7 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }, else => null, - })) |tag| tag else return self.fail("TODO implement airFloatFromInt from {} to {}", .{ + }) orelse return self.fail("TODO implement airFloatFromInt from {} to {}", .{ src_ty.fmt(mod), dst_ty.fmt(mod), }); const dst_alias = dst_reg.to128(); @@ -10308,7 +10349,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { const src_ty = self.typeOf(ty_op.operand); const dst_ty = self.typeOfIndex(inst); - const dst_bits = @as(u32, @intCast(dst_ty.bitSize(mod))); + const dst_bits: u32 = @intCast(dst_ty.bitSize(mod)); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; @@ -10334,7 +10375,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(dst_lock); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { + @as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { .Float => switch (src_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, @@ -10342,7 +10383,7 @@ fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }, else => null, - })) |tag| tag else return self.fail("TODO implement airIntFromFloat from {} to {}", .{ + }) orelse return self.fail("TODO implement airIntFromFloat from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), registerAlias(dst_reg, dst_size), @@ -10361,7 +10402,7 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(extra.ptr); const val_ty = self.typeOf(extra.expected_value); - const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); + const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10463,7 +10504,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @as(u32, @intCast(val_ty.abiSize(mod))); + const val_abi_size: u32 = @intCast(val_ty.abiSize(mod)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10541,7 +10582,7 @@ fn atomicOp( defer self.register_manager.unlockReg(tmp_lock); try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem); - const loop = @as(u32, @intCast(self.mir_instructions.len)); + const loop: u32 = @intCast(self.mir_instructions.len); if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax }); } @@ -10615,7 +10656,7 @@ fn atomicOp( .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 8, })); - const loop = @as(u32, @intCast(self.mir_instructions.len)); + const loop: u32 = @intCast(self.mir_instructions.len); const val_mem_mcv: MCValue = switch (val_mcv) { .memory, .indirect, .load_frame => val_mcv, else => .{ .indirect = .{ @@ -10771,7 +10812,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @as(u31, @intCast(elem_ty.abiSize(mod))); + const elem_abi_size: u31 = @intCast(elem_ty.abiSize(mod)); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { @@ -11251,9 +11292,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result_ty = self.typeOfIndex(inst); - const len = @as(usize, @intCast(result_ty.arrayLen(mod))); + const len: usize = @intCast(result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const elements = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[ty_pl.payload..][0..len])); + const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { switch (result_ty.zigTypeTag(mod)) { .Struct => { @@ -11270,17 +11311,17 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_bit_size = @as(u32, @intCast(elem_ty.bitSize(mod))); + const elem_bit_size: u32 = @intCast(elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod)); const elem_abi_bits = elem_abi_size * 8; const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); - const elem_byte_off = @as(i32, @intCast(elem_off / elem_abi_bits * elem_abi_size)); + const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { @@ -11342,7 +11383,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i, mod); - const elem_off = @as(i32, @intCast(result_ty.structFieldOffset(elem_i, mod))); + const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11356,7 +11397,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); const elem_ty = result_ty.childType(mod); - const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod))); + const elem_size: u32 = @intCast(elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11364,12 +11405,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, else => elem_mcv, }; - const elem_off = @as(i32, @intCast(elem_size * elem_i)); + const elem_off: i32 = @intCast(elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, - @as(i32, @intCast(elem_size * elements.len)), + @intCast(elem_size * elements.len), elem_ty, try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }), ); @@ -11417,16 +11458,16 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const tag_int_val = try tag_val.intFromEnum(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); - const tag_off = if (layout.tag_align < layout.payload_align) - @as(i32, @intCast(layout.payload_size)) + const tag_off: i32 = if (layout.tag_align < layout.payload_align) + @intCast(layout.payload_size) else 0; try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int }); - const pl_off = if (layout.tag_align < layout.payload_align) + const pl_off: i32 = if (layout.tag_align < layout.payload_align) 0 else - @as(i32, @intCast(layout.tag_size)); + @intCast(layout.tag_size); try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv); break :result dst_mcv; @@ -11456,7 +11497,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var order = [1]u2{0} ** 3; var unused = std.StaticBitSet(3).initFull(); for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| { - const op_index = @as(u2, @intCast(op_i)); + const op_index: u2 = @intCast(op_i); mcv.* = try self.resolveInst(op); if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) { order[op_index] = 1; @@ -11480,99 +11521,97 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { lock.* = self.register_manager.lockRegAssumeUnused(reg); } - const mir_tag = if (@as( - ?Mir.Inst.FixedTag, - if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag(mod)) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .{ .v_ss, .fmadd132 }, - 64 => .{ .v_sd, .fmadd132 }, + const mir_tag = @as(?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or + mem.eql(u2, &order, &.{ 3, 1, 2 })) + switch (ty.zigTypeTag(mod)) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .{ .v_ss, .fmadd132 }, + 64 => .{ .v_sd, .fmadd132 }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { + 1 => .{ .v_ss, .fmadd132 }, + 2...8 => .{ .v_ps, .fmadd132 }, + else => null, + }, + 64 => switch (ty.vectorLen(mod)) { + 1 => .{ .v_sd, .fmadd132 }, + 2...4 => .{ .v_pd, .fmadd132 }, + else => null, + }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { - .Float => switch (ty.childType(mod).floatBits(self.target.*)) { - 32 => switch (ty.vectorLen(mod)) { - 1 => .{ .v_ss, .fmadd132 }, - 2...8 => .{ .v_ps, .fmadd132 }, - else => null, - }, - 64 => switch (ty.vectorLen(mod)) { - 1 => .{ .v_sd, .fmadd132 }, - 2...4 => .{ .v_pd, .fmadd132 }, - else => null, - }, - 16, 80, 128 => null, - else => unreachable, - }, - else => unreachable, - }, else => unreachable, - } - else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag(mod)) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .{ .v_ss, .fmadd213 }, - 64 => .{ .v_sd, .fmadd213 }, + }, + else => unreachable, + } + else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) + switch (ty.zigTypeTag(mod)) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .{ .v_ss, .fmadd213 }, + 64 => .{ .v_sd, .fmadd213 }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { + 1 => .{ .v_ss, .fmadd213 }, + 2...8 => .{ .v_ps, .fmadd213 }, + else => null, + }, + 64 => switch (ty.vectorLen(mod)) { + 1 => .{ .v_sd, .fmadd213 }, + 2...4 => .{ .v_pd, .fmadd213 }, + else => null, + }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { - .Float => switch (ty.childType(mod).floatBits(self.target.*)) { - 32 => switch (ty.vectorLen(mod)) { - 1 => .{ .v_ss, .fmadd213 }, - 2...8 => .{ .v_ps, .fmadd213 }, - else => null, - }, - 64 => switch (ty.vectorLen(mod)) { - 1 => .{ .v_sd, .fmadd213 }, - 2...4 => .{ .v_pd, .fmadd213 }, - else => null, - }, - 16, 80, 128 => null, - else => unreachable, - }, - else => unreachable, - }, else => unreachable, - } - else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag(mod)) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .{ .v_ss, .fmadd231 }, - 64 => .{ .v_sd, .fmadd231 }, + }, + else => unreachable, + } + else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) + switch (ty.zigTypeTag(mod)) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .{ .v_ss, .fmadd231 }, + 64 => .{ .v_sd, .fmadd231 }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { + 1 => .{ .v_ss, .fmadd231 }, + 2...8 => .{ .v_ps, .fmadd231 }, + else => null, + }, + 64 => switch (ty.vectorLen(mod)) { + 1 => .{ .v_sd, .fmadd231 }, + 2...4 => .{ .v_pd, .fmadd231 }, + else => null, + }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { - .Float => switch (ty.childType(mod).floatBits(self.target.*)) { - 32 => switch (ty.vectorLen(mod)) { - 1 => .{ .v_ss, .fmadd231 }, - 2...8 => .{ .v_ps, .fmadd231 }, - else => null, - }, - 64 => switch (ty.vectorLen(mod)) { - 1 => .{ .v_sd, .fmadd231 }, - 2...4 => .{ .v_pd, .fmadd231 }, - else => null, - }, - 16, 80, 128 => null, - else => unreachable, - }, - else => unreachable, - }, else => unreachable, - } - else - unreachable, - )) |tag| tag else return self.fail("TODO implement airMulAdd for {}", .{ + }, + else => unreachable, + } + else + unreachable) orelse return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), }); var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @as(u32, @intCast(ty.abiSize(mod))); + const abi_size: u32 = @intCast(ty.abiSize(mod)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11725,7 +11764,7 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @as(u31, @intCast(4 * Type.usize.abiSize(mod))); + result.stack_byte_count += @intCast(4 * Type.usize.abiSize(mod)); }, else => {}, } @@ -11748,7 +11787,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @as(u32, @intCast(ret_ty.abiSize(mod))), + @intCast(ret_ty.abiSize(mod)), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11784,18 +11823,16 @@ fn resolveCallingConventionValues( }, .float, .sse => switch (self.target.os.tag) { .windows => if (param_reg_i < 4) { - arg.* = .{ .register = @as( - Register, - @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i), - ) }; + arg.* = .{ + .register = @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i), + }; param_reg_i += 1; continue; }, else => if (param_sse_reg_i < 8) { - arg.* = .{ .register = @as( - Register, - @enumFromInt(@intFromEnum(Register.xmm0) + param_sse_reg_i), - ) }; + arg.* = .{ + .register = @enumFromInt(@intFromEnum(Register.xmm0) + param_sse_reg_i), + }; param_sse_reg_i += 1; continue; }, @@ -11806,8 +11843,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @as(u31, @intCast(ty.abiSize(mod))); - const param_align = @as(u31, @intCast(ty.abiAlignment(mod))); + const param_size: u31 = @intCast(ty.abiSize(mod)); + const param_align: u31 = @intCast(ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11827,7 +11864,7 @@ fn resolveCallingConventionValues( result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @as(u31, @intCast(ret_ty.abiSize(mod))); + const ret_ty_size: u31 = @intCast(ret_ty.abiSize(mod)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11846,8 +11883,8 @@ fn resolveCallingConventionValues( arg.* = .none; continue; } - const param_size = @as(u31, @intCast(ty.abiSize(mod))); - const param_align = @as(u31, @intCast(ty.abiAlignment(mod))); + const param_size: u31 = @intCast(ty.abiSize(mod)); + const param_align: u31 = @intCast(ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11934,12 +11971,12 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { const mod = self.bin_file.options.module.?; const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @as(u16, @intCast(ty.bitSize(mod))), + .bits = @intCast(ty.bitSize(mod)), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { .signed => { - const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits)); + const shift: u6 = @intCast(max_reg_bit_width - int_info.bits); try self.genShiftBinOpMir( .{ ._l, .sa }, Type.isize, @@ -11954,8 +11991,8 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { ); }, .unsigned => { - const shift = @as(u6, @intCast(max_reg_bit_width - int_info.bits)); - const mask = (~@as(u64, 0)) >> shift; + const shift: u6 = @intCast(max_reg_bit_width - int_info.bits); + const mask = ~@as(u64, 0) >> shift; if (int_info.bits <= 32) { try self.genBinOpMir( .{ ._, .@"and" }, diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index 04b21b9e21..a640853d5d 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -384,7 +384,7 @@ test "Register id - different classes" { try expect(Register.xmm0.id() != Register.mm0.id()); try expect(Register.mm0.id() != Register.st0.id()); - try expect(Register.es.id() == 0b100000); + try expect(Register.es.id() == 0b110000); } test "Register enc - different classes" { diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index a0cd1af0a7..3518d03d4a 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -369,11 +369,11 @@ pub const table = [_]Entry{ .{ .mov, .rm, &.{ .sreg, .r32_m16 }, &.{ 0x8e }, 0, .none, .none }, .{ .mov, .rm, &.{ .sreg, .r64_m16 }, &.{ 0x8e }, 0, .long, .none }, .{ .mov, .fd, &.{ .al, .moffs }, &.{ 0xa0 }, 0, .none, .none }, - .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .short, .none }, .{ .mov, .fd, &.{ .eax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, .{ .mov, .fd, &.{ .rax, .moffs }, &.{ 0xa1 }, 0, .long, .none }, .{ .mov, .td, &.{ .moffs, .al }, &.{ 0xa2 }, 0, .none, .none }, - .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .short, .none }, .{ .mov, .td, &.{ .moffs, .eax }, &.{ 0xa3 }, 0, .none, .none }, .{ .mov, .td, &.{ .moffs, .rax }, &.{ 0xa3 }, 0, .long, .none }, .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .none, .none }, diff --git a/src/codegen.zig b/src/codegen.zig index 9e5ae11a63..69499fb1ad 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -558,7 +558,7 @@ pub fn generateSymbol( } // Check if we should store the tag first. - if (layout.tag_align >= layout.payload_align) { + if (layout.tag_size > 0 and layout.tag_align >= layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = typed_value.ty.unionTagType(mod).?, .val = un.tag.toValue(), @@ -589,7 +589,7 @@ pub fn generateSymbol( } } - if (layout.tag_size > 0) { + if (layout.tag_size > 0 and layout.tag_align < layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = union_ty.tag_ty, .val = un.tag.toValue(), @@ -597,10 +597,10 @@ pub fn generateSymbol( .ok => {}, .fail => |em| return Result{ .fail = em }, } - } - if (layout.padding > 0) { - try code.writer().writeByteNTimes(0, layout.padding); + if (layout.padding > 0) { + try code.writer().writeByteNTimes(0, layout.padding); + } } }, .memoized_call => unreachable, @@ -684,10 +684,22 @@ fn lowerParentPtr( .struct_type, .anon_struct_type, .union_type, - => @as(u32, @intCast(base_type.toType().structFieldOffset( - @as(u32, @intCast(field.index)), - mod, - ))), + => switch (base_type.toType().containerLayout(mod)) { + .Auto, .Extern => @intCast(base_type.toType().structFieldOffset( + @intCast(field.index), + mod, + )), + .Packed => if (mod.typeToStruct(base_type.toType())) |struct_obj| + math.divExact(u16, struct_obj.packedFieldBitOffset( + mod, + @intCast(field.index), + ), 8) catch |err| switch (err) { + error.UnexpectedRemainder => 0, + error.DivisionByZero => unreachable, + } + else + 0, + }, else => unreachable, }), ); diff --git a/test/behavior/bugs/13664.zig b/test/behavior/bugs/13664.zig index b0ea3f70af..828408510d 100644 --- a/test/behavior/bugs/13664.zig +++ b/test/behavior/bugs/13664.zig @@ -13,7 +13,6 @@ fn value() i64 { return 1341; } test { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/1381.zig b/test/behavior/bugs/1381.zig index 90941de341..cd0b289e5f 100644 --- a/test/behavior/bugs/1381.zig +++ b/test/behavior/bugs/1381.zig @@ -12,7 +12,6 @@ const A = union(enum) { }; test "union that needs padding bytes inside an array" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index 12cc027ef4..e2970c3603 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -376,7 +376,6 @@ test "load pointer from packed struct" { } test "@intFromPtr on a packed struct field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -608,7 +607,6 @@ test "pointer to container level packed struct field" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 08c4c37eee..1335498ca4 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -427,7 +427,6 @@ test "packed struct 24bits" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.cpu.arch == .wasm32) return error.SkipZigTest; // TODO if (builtin.cpu.arch == .arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -531,7 +530,6 @@ test "packed struct fields are ordered from LSB to MSB" { test "implicit cast packed struct field to const ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -1079,7 +1077,6 @@ test "packed struct with undefined initializers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig index 763c87605c..9a6e0542e8 100644 --- a/test/behavior/threadlocal.zig +++ b/test/behavior/threadlocal.zig @@ -12,6 +12,11 @@ test "thread local variable" { }; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) { + // Fails due to register hazards. + return error.SkipZigTest; + } + const S = struct { threadlocal var t: i32 = 1234; };