diff --git a/src/InternPool.zig b/src/InternPool.zig index 368ef51826..c5f6c3b141 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4195,7 +4195,7 @@ fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) K .len = fields_len, }, .field_inits = if (inits) .{ - .start = type_struct_packed.end + fields_len + fields_len, + .start = type_struct_packed.end + fields_len * 2, .len = fields_len, } else .{ .start = 0, diff --git a/src/Module.zig b/src/Module.zig index 17a97e6c6d..bdf8d3a768 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6650,10 +6650,10 @@ pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment { return ty_abi_align; } -/// TODO: avoid linear search by storing these in trailing data of packed struct types -/// then packedStructFieldByteOffset can be expressed in terms of bits / 8, fixing -/// that one too. -/// https://github.com/ziglang/zig/issues/17178 +/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets +/// into the packed struct InternPool data rather than computing this on the +/// fly, however it was found to perform worse when measured on real world +/// projects. pub fn structPackedFieldBitOffset( mod: *Module, struct_type: InternPool.Key.StructType, diff --git a/src/Sema.zig b/src/Sema.zig index c33a1a7603..b49a8a9997 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -21377,8 +21377,9 @@ fn reifyStruct( } var fields_bit_sum: u64 = 0; - for (struct_type.field_types.get(ip)) |field_ty| { - fields_bit_sum += field_ty.toType().bitSize(mod); + for (0..struct_type.field_types.len) |i| { + const field_ty = struct_type.field_types.get(ip)[i].toType(); + fields_bit_sum += field_ty.bitSize(mod); } if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| { diff --git a/src/type.zig b/src/type.zig index a4f85ae946..ee0b19d099 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3021,27 +3021,24 @@ pub const Type = struct { }; } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { + pub fn packedStructFieldBitOffset(ty: Type, field_index: usize, mod: *Module) u32 { const ip = &mod.intern_pool; const struct_type = ip.indexToKey(ty.toIntern()).struct_type; assert(struct_type.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); - var bit_offset: u16 = undefined; - var elem_size_bits: u16 = undefined; - var running_bits: u16 = 0; + var running_bits: u32 = 0; for (struct_type.field_types.get(ip), 0..) |field_ty, i| { + if (i == field_index) break; if (!field_ty.toType().hasRuntimeBits(mod)) continue; - - const field_bits: u16 = @intCast(field_ty.toType().bitSize(mod)); - if (i == field_index) { - bit_offset = running_bits; - elem_size_bits = field_bits; - } + const field_bits: u32 = @intCast(field_ty.toType().bitSize(mod)); running_bits += field_bits; } - const byte_offset = bit_offset / 8; - return byte_offset; + return running_bits; + } + + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { + return packedStructFieldBitOffset(ty, field_index, mod) / 8; } pub const FieldOffset = struct {