diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 36e9813c2a..d3e874a211 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -1832,7 +1832,6 @@ pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: Endian) v pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value: anytype, endian: std.builtin.Endian) void { const T = @TypeOf(value); const uN = std.meta.Int(.unsigned, @bitSizeOf(T)); - const Log2N = std.math.Log2Int(T); const bit_shift = @as(u3, @intCast(bit_offset % 8)); const write_size = (bit_count + bit_shift + 7) / 8; @@ -1861,9 +1860,9 @@ pub fn writeVarPackedInt(bytes: []u8, bit_offset: usize, bit_count: usize, value // Write first byte, using a mask to protects bits preceding bit_offset const head_mask = @as(u8, 0xff) >> bit_shift; - write_bytes[@as(usize, @intCast(i))] &= ~(head_mask << bit_shift); - write_bytes[@as(usize, @intCast(i))] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift; - remaining >>= @as(Log2N, @intCast(@as(u4, 8) - bit_shift)); + write_bytes[@intCast(i)] &= ~(head_mask << bit_shift); + write_bytes[@intCast(i)] |= @as(u8, @intCast(@as(uN, @bitCast(remaining)) & head_mask)) << bit_shift; + remaining = math.shr(T, remaining, @as(u4, 8) - bit_shift); i += delta; // Write bytes[1..bytes.len - 1] diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 1b8a9fbd7f..b1e5265815 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4341,6 +4341,9 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); + if (src_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement airClz for {}", .{ + src_ty.fmt(mod), + }); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4691,7 +4694,9 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m defer if (src_lock) |lock| self.register_manager.unlockReg(lock); switch (src_bits) { - else => unreachable, + else => return self.fail("TODO implement byteSwap for {}", .{ + src_ty.fmt(mod), + }), 8 => return if ((mem_ok or src_mcv.isRegister()) and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv @@ -6093,16 +6098,16 @@ fn genShiftBinOp( rhs_ty: Type, ) !MCValue { const mod = self.bin_file.options.module.?; - if (lhs_ty.zigTypeTag(mod) == .Vector) { - return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); - } + if (lhs_ty.zigTypeTag(mod) == .Vector) return self.fail("TODO implement genShiftBinOp for {}", .{ + lhs_ty.fmt(mod), + }); assert(rhs_ty.abiSize(mod) == 1); const lhs_abi_size = lhs_ty.abiSize(mod); - if (lhs_abi_size > 16) { - return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); - } + if (lhs_abi_size > 16) return self.fail("TODO implement genShiftBinOp for {}", .{ + lhs_ty.fmt(mod), + }); try self.register_manager.getReg(.rcx, null); const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx); @@ -6158,11 +6163,12 @@ fn genMulDivBinOp( rhs: MCValue, ) !MCValue { const mod = self.bin_file.options.module.?; - if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { - return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); - } - const dst_abi_size = @as(u32, @intCast(dst_ty.abiSize(mod))); - const src_abi_size = @as(u32, @intCast(src_ty.abiSize(mod))); + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) return self.fail( + "TODO implement genMulDivBinOp for {}", + .{dst_ty.fmt(mod)}, + ); + const dst_abi_size: u32 = @intCast(dst_ty.abiSize(mod)); + const src_abi_size: u32 = @intCast(src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mul_wrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2,