mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 00:08:56 +00:00
std: replace usage of std.meta.bitCount() with @bitSizeOf()
This commit is contained in:
parent
6c0719fd5f
commit
6f4343b61a
@ -167,10 +167,10 @@ pub const DefaultRwLock = struct {
|
||||
|
||||
const IS_WRITING: usize = 1;
|
||||
const WRITER: usize = 1 << 1;
|
||||
const READER: usize = 1 << (1 + std.meta.bitCount(Count));
|
||||
const READER: usize = 1 << (1 + @bitSizeOf(Count));
|
||||
const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, WRITER);
|
||||
const READER_MASK: usize = std.math.maxInt(Count) << @ctz(usize, READER);
|
||||
const Count = std.meta.Int(.unsigned, @divFloor(std.meta.bitCount(usize) - 1, 2));
|
||||
const Count = std.meta.Int(.unsigned, @divFloor(@bitSizeOf(usize) - 1, 2));
|
||||
|
||||
pub fn tryLock(rwl: *DefaultRwLock) bool {
|
||||
if (rwl.mutex.tryLock()) {
|
||||
|
||||
@ -542,7 +542,7 @@ test "Atomic.bitSet" {
|
||||
inline for (atomicIntTypes()) |Int| {
|
||||
inline for (atomic_rmw_orderings) |ordering| {
|
||||
var x = Atomic(Int).init(0);
|
||||
const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
|
||||
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
|
||||
|
||||
for (bit_array) |_, bit_index| {
|
||||
const bit = @intCast(std.math.Log2Int(Int), bit_index);
|
||||
@ -572,7 +572,7 @@ test "Atomic.bitReset" {
|
||||
inline for (atomicIntTypes()) |Int| {
|
||||
inline for (atomic_rmw_orderings) |ordering| {
|
||||
var x = Atomic(Int).init(0);
|
||||
const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
|
||||
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
|
||||
|
||||
for (bit_array) |_, bit_index| {
|
||||
const bit = @intCast(std.math.Log2Int(Int), bit_index);
|
||||
@ -603,7 +603,7 @@ test "Atomic.bitToggle" {
|
||||
inline for (atomicIntTypes()) |Int| {
|
||||
inline for (atomic_rmw_orderings) |ordering| {
|
||||
var x = Atomic(Int).init(0);
|
||||
const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
|
||||
const bit_array = @as([@bitSizeOf(Int)]void, undefined);
|
||||
|
||||
for (bit_array) |_, bit_index| {
|
||||
const bit = @intCast(std.math.Log2Int(Int), bit_index);
|
||||
|
||||
@ -235,7 +235,7 @@ pub fn Field(comptime params: FieldParams) type {
|
||||
}
|
||||
var v_opp: Limbs = undefined;
|
||||
fiat.opp(&v_opp, v);
|
||||
fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (meta.bitCount(Word) - 1)), v, v_opp);
|
||||
fiat.selectznz(&v, @truncate(u1, f[f.len - 1] >> (@bitSizeOf(Word) - 1)), v, v_opp);
|
||||
var fe: Fe = undefined;
|
||||
fiat.mul(&fe.limbs, v, precomp);
|
||||
return fe;
|
||||
|
||||
@ -348,7 +348,7 @@ const crypt_format = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn intDecode(comptime T: type, src: *const [(meta.bitCount(T) + 5) / 6]u8) !T {
|
||||
fn intDecode(comptime T: type, src: *const [(@bitSizeOf(T) + 5) / 6]u8) !T {
|
||||
var v: T = 0;
|
||||
for (src) |x, i| {
|
||||
const vi = mem.indexOfScalar(u8, &map64, x) orelse return EncodingError.InvalidEncoding;
|
||||
|
||||
@ -14,7 +14,7 @@ const Loop = std.event.Loop;
|
||||
/// `begin` will return error.Overflow when the limit is reached, even
|
||||
/// if the integer type has not has not overflowed.
|
||||
/// By default `max_value` is set to std.math.maxInt(CounterType).
|
||||
pub const WaitGroup = WaitGroupGeneric(std.meta.bitCount(usize));
|
||||
pub const WaitGroup = WaitGroupGeneric(@bitSizeOf(usize));
|
||||
|
||||
pub fn WaitGroupGeneric(comptime counter_size: u16) type {
|
||||
const CounterType = std.meta.Int(.unsigned, counter_size);
|
||||
|
||||
@ -1121,7 +1121,7 @@ pub fn formatFloatHexadecimal(
|
||||
}
|
||||
|
||||
const T = @TypeOf(value);
|
||||
const TU = std.meta.Int(.unsigned, std.meta.bitCount(T));
|
||||
const TU = std.meta.Int(.unsigned, @bitSizeOf(T));
|
||||
|
||||
const mantissa_bits = math.floatMantissaBits(T);
|
||||
const fractional_bits = math.floatFractionalBits(T);
|
||||
|
||||
@ -1181,7 +1181,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
|
||||
// very near usize?
|
||||
if (mem.page_size << 2 > maxInt(usize)) return;
|
||||
|
||||
const USizeShift = std.meta.Int(.unsigned, std.math.log2(std.meta.bitCount(usize)));
|
||||
const USizeShift = std.meta.Int(.unsigned, std.math.log2(@bitSizeOf(usize)));
|
||||
const large_align = @as(u29, mem.page_size << 2);
|
||||
|
||||
var align_mask: usize = undefined;
|
||||
|
||||
@ -17,9 +17,9 @@ pub fn BitReader(endian: std.builtin.Endian, comptime ReaderType: type) type {
|
||||
pub const Reader = io.Reader(*Self, Error, read);
|
||||
|
||||
const Self = @This();
|
||||
const u8_bit_count = meta.bitCount(u8);
|
||||
const u7_bit_count = meta.bitCount(u7);
|
||||
const u4_bit_count = meta.bitCount(u4);
|
||||
const u8_bit_count = @bitSizeOf(u8);
|
||||
const u7_bit_count = @bitSizeOf(u7);
|
||||
const u4_bit_count = @bitSizeOf(u4);
|
||||
|
||||
pub fn init(forward_reader: ReaderType) Self {
|
||||
return Self{
|
||||
@ -47,7 +47,7 @@ pub fn BitReader(endian: std.builtin.Endian, comptime ReaderType: type) type {
|
||||
|
||||
//by extending the buffer to a minimum of u8 we can cover a number of edge cases
|
||||
// related to shifting and casting.
|
||||
const u_bit_count = comptime meta.bitCount(U);
|
||||
const u_bit_count = @bitSizeOf(U);
|
||||
const buf_bit_count = bc: {
|
||||
assert(u_bit_count >= bits);
|
||||
break :bc if (u_bit_count <= u8_bit_count) u8_bit_count else u_bit_count;
|
||||
|
||||
@ -17,8 +17,8 @@ pub fn BitWriter(endian: std.builtin.Endian, comptime WriterType: type) type {
|
||||
pub const Writer = io.Writer(*Self, Error, write);
|
||||
|
||||
const Self = @This();
|
||||
const u8_bit_count = meta.bitCount(u8);
|
||||
const u4_bit_count = meta.bitCount(u4);
|
||||
const u8_bit_count = @bitSizeOf(u8);
|
||||
const u4_bit_count = @bitSizeOf(u4);
|
||||
|
||||
pub fn init(forward_writer: WriterType) Self {
|
||||
return Self{
|
||||
@ -39,7 +39,7 @@ pub fn BitWriter(endian: std.builtin.Endian, comptime WriterType: type) type {
|
||||
|
||||
//by extending the buffer to a minimum of u8 we can cover a number of edge cases
|
||||
// related to shifting and casting.
|
||||
const u_bit_count = comptime meta.bitCount(U);
|
||||
const u_bit_count = @bitSizeOf(U);
|
||||
const buf_bit_count = bc: {
|
||||
assert(u_bit_count >= bits);
|
||||
break :bc if (u_bit_count <= u8_bit_count) u8_bit_count else u_bit_count;
|
||||
|
||||
@ -947,10 +947,10 @@ test "absCast" {
|
||||
|
||||
/// Returns the negation of the integer parameter.
|
||||
/// Result is a signed integer.
|
||||
pub fn negateCast(x: anytype) !std.meta.Int(.signed, std.meta.bitCount(@TypeOf(x))) {
|
||||
pub fn negateCast(x: anytype) !std.meta.Int(.signed, @bitSizeOf(@TypeOf(x))) {
|
||||
if (@typeInfo(@TypeOf(x)).Int.signedness == .signed) return negate(x);
|
||||
|
||||
const int = std.meta.Int(.signed, std.meta.bitCount(@TypeOf(x)));
|
||||
const int = std.meta.Int(.signed, @bitSizeOf(@TypeOf(x)));
|
||||
if (x > -minInt(int)) return error.Overflow;
|
||||
|
||||
if (x == -minInt(int)) return minInt(int);
|
||||
|
||||
@ -5561,7 +5561,7 @@ pub fn res_mkquery(
|
||||
// Make a reasonably unpredictable id
|
||||
var ts: timespec = undefined;
|
||||
clock_gettime(CLOCK.REALTIME, &ts) catch {};
|
||||
const UInt = std.meta.Int(.unsigned, std.meta.bitCount(@TypeOf(ts.tv_nsec)));
|
||||
const UInt = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(ts.tv_nsec)));
|
||||
const unsec = @bitCast(UInt, ts.tv_nsec);
|
||||
const id = @truncate(u32, unsec + unsec / 65536);
|
||||
q[0] = @truncate(u8, id / 256);
|
||||
|
||||
@ -341,7 +341,7 @@ pub const DeclGen = struct {
|
||||
// We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
|
||||
// SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this
|
||||
// might need to be updated.
|
||||
assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
|
||||
assert(self.largestSupportedIntBits() <= @bitSizeOf(u64));
|
||||
|
||||
// Note, value is required to be sign-extended, so we don't need to mask off the upper bits.
|
||||
// See https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#Literal
|
||||
|
||||
@ -170,7 +170,7 @@ fn writeString(section: *Section, str: []const u8) void {
|
||||
|
||||
var j: usize = 0;
|
||||
while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
|
||||
word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * std.meta.bitCount(u8));
|
||||
word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * @bitSizeOf(u8));
|
||||
}
|
||||
|
||||
section.instructions.appendAssumeCapacity(word);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user