Merge pull request #17256 from ziglang/packed-bit-offsets

compiler: packed structs cache bit offsets
This commit is contained in:
Andrew Kelley 2023-09-24 19:42:06 -07:00 committed by GitHub
commit eb072fa528
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 17 additions and 19 deletions

View File

@ -4195,7 +4195,7 @@ fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) K
.len = fields_len,
},
.field_inits = if (inits) .{
.start = type_struct_packed.end + fields_len + fields_len,
.start = type_struct_packed.end + fields_len * 2,
.len = fields_len,
} else .{
.start = 0,

View File

@ -6650,10 +6650,10 @@ pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
return ty_abi_align;
}
/// TODO: avoid linear search by storing these in trailing data of packed struct types
/// then packedStructFieldByteOffset can be expressed in terms of bits / 8, fixing
/// that one too.
/// https://github.com/ziglang/zig/issues/17178
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
/// into the packed struct InternPool data rather than computing this on the
/// fly, however it was found to perform worse when measured on real world
/// projects.
pub fn structPackedFieldBitOffset(
mod: *Module,
struct_type: InternPool.Key.StructType,

View File

@ -21377,8 +21377,9 @@ fn reifyStruct(
}
var fields_bit_sum: u64 = 0;
for (struct_type.field_types.get(ip)) |field_ty| {
fields_bit_sum += field_ty.toType().bitSize(mod);
for (0..struct_type.field_types.len) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
fields_bit_sum += field_ty.bitSize(mod);
}
if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| {

View File

@ -3021,27 +3021,24 @@ pub const Type = struct {
};
}
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
pub fn packedStructFieldBitOffset(ty: Type, field_index: usize, mod: *Module) u32 {
const ip = &mod.intern_pool;
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
assert(struct_type.layout == .Packed);
comptime assert(Type.packed_struct_layout_version == 2);
var bit_offset: u16 = undefined;
var elem_size_bits: u16 = undefined;
var running_bits: u16 = 0;
var running_bits: u32 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, i| {
if (i == field_index) break;
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_bits: u16 = @intCast(field_ty.toType().bitSize(mod));
if (i == field_index) {
bit_offset = running_bits;
elem_size_bits = field_bits;
}
const field_bits: u32 = @intCast(field_ty.toType().bitSize(mod));
running_bits += field_bits;
}
const byte_offset = bit_offset / 8;
return byte_offset;
return running_bits;
}
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
return packedStructFieldBitOffset(ty, field_index, mod) / 8;
}
pub const FieldOffset = struct {