mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 16:24:51 +00:00
sema: clean-up {union,struct}FieldAlignment and friends
My main gripes with this design were that it was incorrectly namespaced, the naming was inconsistent and a bit wrong (`fooAlign` vs `fooAlignment`). This commit moves all the logic from `PerThread.zig` to use the zcu + tid system that the previous couple commits introduce. I've organized and merged the functions to be a bit more specific to their own purpose. - `fieldAlignment` takes a struct or union type, an index, and a Zcu (or the Sema version which takes a Pt), and gives you the alignment of the field at the index. - `structFieldAlignment` takes the field type itself, and provides the logic to handle special cases, such as externs. A design goal I had in mind was to avoid using the word 'struct' in the function name, when it worked for things that aren't structs, such as unions.
This commit is contained in:
parent
b4bb64ce78
commit
80cd53d3bb
110
src/Sema.zig
110
src/Sema.zig
@ -4887,7 +4887,7 @@ fn validateStructInit(
|
||||
const i: u32 = @intCast(i_usize);
|
||||
if (opt_field_ptr.unwrap()) |field_ptr| {
|
||||
// Determine whether the value stored to this pointer is comptime-known.
|
||||
const field_ty = struct_ty.structFieldType(i, zcu);
|
||||
const field_ty = struct_ty.fieldType(i, zcu);
|
||||
if (try sema.typeHasOnePossibleValue(field_ty)) |opv| {
|
||||
field_values[i] = opv.toIntern();
|
||||
continue;
|
||||
@ -4999,7 +4999,7 @@ fn validateStructInit(
|
||||
var block_index = first_block_index;
|
||||
for (block.instructions.items[first_block_index..]) |cur_inst| {
|
||||
while (field_ptr_ref == .none and init_index < instrs.len) : (init_index += 1) {
|
||||
const field_ty = struct_ty.structFieldType(field_indices[init_index], zcu);
|
||||
const field_ty = struct_ty.fieldType(field_indices[init_index], zcu);
|
||||
if (try field_ty.onePossibleValue(pt)) |_| continue;
|
||||
field_ptr_ref = sema.inst_map.get(instrs[init_index]).?;
|
||||
}
|
||||
@ -8430,7 +8430,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
|
||||
try indexable_ty.resolveFields(pt);
|
||||
assert(indexable_ty.isIndexable(zcu)); // validated by a previous instruction
|
||||
if (indexable_ty.zigTypeTag(zcu) == .Struct) {
|
||||
const elem_type = indexable_ty.structFieldType(@intFromEnum(bin.rhs), zcu);
|
||||
const elem_type = indexable_ty.fieldType(@intFromEnum(bin.rhs), zcu);
|
||||
return Air.internedToRef(elem_type.toIntern());
|
||||
} else {
|
||||
const elem_type = indexable_ty.elemType2(zcu);
|
||||
@ -14419,7 +14419,7 @@ fn analyzeTupleCat(
|
||||
var runtime_src: ?LazySrcLoc = null;
|
||||
var i: u32 = 0;
|
||||
while (i < lhs_len) : (i += 1) {
|
||||
types[i] = lhs_ty.structFieldType(i, zcu).toIntern();
|
||||
types[i] = lhs_ty.fieldType(i, zcu).toIntern();
|
||||
const default_val = lhs_ty.structFieldDefaultValue(i, zcu);
|
||||
values[i] = default_val.toIntern();
|
||||
const operand_src = block.src(.{ .array_cat_lhs = .{
|
||||
@ -14433,7 +14433,7 @@ fn analyzeTupleCat(
|
||||
}
|
||||
i = 0;
|
||||
while (i < rhs_len) : (i += 1) {
|
||||
types[i + lhs_len] = rhs_ty.structFieldType(i, zcu).toIntern();
|
||||
types[i + lhs_len] = rhs_ty.fieldType(i, zcu).toIntern();
|
||||
const default_val = rhs_ty.structFieldDefaultValue(i, zcu);
|
||||
values[i + lhs_len] = default_val.toIntern();
|
||||
const operand_src = block.src(.{ .array_cat_rhs = .{
|
||||
@ -14791,7 +14791,7 @@ fn analyzeTupleMul(
|
||||
const opt_runtime_src = rs: {
|
||||
var runtime_src: ?LazySrcLoc = null;
|
||||
for (0..tuple_len) |i| {
|
||||
types[i] = operand_ty.structFieldType(i, zcu).toIntern();
|
||||
types[i] = operand_ty.fieldType(i, zcu).toIntern();
|
||||
values[i] = operand_ty.structFieldDefaultValue(i, zcu).toIntern();
|
||||
const operand_src = block.src(.{ .array_cat_lhs = .{
|
||||
.array_cat_offset = src_node,
|
||||
@ -18466,13 +18466,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
};
|
||||
|
||||
const alignment = switch (layout) {
|
||||
.auto, .@"extern" => try Type.unionFieldNormalAlignmentAdvanced(
|
||||
union_obj,
|
||||
@intCast(field_index),
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
),
|
||||
.auto, .@"extern" => try ty.fieldAlignmentSema(field_index, pt),
|
||||
.@"packed" => .none,
|
||||
};
|
||||
|
||||
@ -18691,12 +18685,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const default_val_ptr = try sema.optRefValue(opt_default_val);
|
||||
const alignment = switch (struct_type.layout) {
|
||||
.@"packed" => .none,
|
||||
else => try field_ty.structFieldAlignmentAdvanced(
|
||||
else => try field_ty.structFieldAlignmentSema(
|
||||
struct_type.fieldAlign(ip, field_index),
|
||||
struct_type.layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
pt,
|
||||
),
|
||||
};
|
||||
|
||||
@ -20327,7 +20319,7 @@ fn zirStructInit(
|
||||
assert(field_inits[field_index] == .none);
|
||||
found_fields[field_index] = item.data.field_type;
|
||||
const uncoerced_init = try sema.resolveInst(item.data.init);
|
||||
const field_ty = resolved_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = resolved_ty.fieldType(field_index, zcu);
|
||||
field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src);
|
||||
if (!is_packed) {
|
||||
try resolved_ty.resolveStructFieldInits(pt);
|
||||
@ -20338,7 +20330,7 @@ fn zirStructInit(
|
||||
});
|
||||
};
|
||||
|
||||
if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, zcu), zcu)) {
|
||||
if (!init_val.eql(default_value, resolved_ty.fieldType(field_index, zcu), zcu)) {
|
||||
return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index);
|
||||
}
|
||||
}
|
||||
@ -20799,7 +20791,7 @@ fn zirArrayInit(
|
||||
const arg = args[i + 1];
|
||||
const resolved_arg = try sema.resolveInst(arg);
|
||||
const elem_ty = if (is_tuple)
|
||||
array_ty.structFieldType(i, zcu)
|
||||
array_ty.fieldType(i, zcu)
|
||||
else
|
||||
array_ty.elemType2(zcu);
|
||||
dest.* = try sema.coerce(block, elem_ty, resolved_arg, elem_src);
|
||||
@ -20862,7 +20854,7 @@ fn zirArrayInit(
|
||||
if (is_tuple) {
|
||||
for (resolved_args, 0..) |arg, i| {
|
||||
const elem_ptr_ty = try pt.ptrTypeSema(.{
|
||||
.child = array_ty.structFieldType(i, zcu).toIntern(),
|
||||
.child = array_ty.fieldType(i, zcu).toIntern(),
|
||||
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
|
||||
});
|
||||
const elem_ptr_ty_ref = Air.internedToRef(elem_ptr_ty.toIntern());
|
||||
@ -25234,7 +25226,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
|
||||
},
|
||||
.packed_offset = parent_ptr_info.packed_offset,
|
||||
};
|
||||
const field_ty = parent_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = parent_ty.fieldType(field_index, zcu);
|
||||
var actual_field_ptr_info: InternPool.Key.PtrType = .{
|
||||
.child = field_ty.toIntern(),
|
||||
.flags = .{
|
||||
@ -25249,19 +25241,17 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
|
||||
switch (parent_ty.containerLayout(zcu)) {
|
||||
.auto => {
|
||||
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(
|
||||
if (zcu.typeToStruct(parent_ty)) |struct_obj| try field_ty.structFieldAlignmentAdvanced(
|
||||
struct_obj.fieldAlign(ip, field_index),
|
||||
struct_obj.layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
) else if (zcu.typeToUnion(parent_ty)) |union_obj|
|
||||
try Type.unionFieldNormalAlignmentAdvanced(
|
||||
union_obj,
|
||||
field_index,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
if (zcu.typeToStruct(parent_ty)) |struct_obj|
|
||||
try field_ty.structFieldAlignmentSema(
|
||||
struct_obj.fieldAlign(ip, field_index),
|
||||
struct_obj.layout,
|
||||
pt,
|
||||
)
|
||||
else if (zcu.typeToUnion(parent_ty)) |union_obj|
|
||||
try field_ty.unionFieldAlignmentSema(
|
||||
union_obj.fieldAlign(ip, field_index),
|
||||
union_obj.flagsUnordered(ip).layout,
|
||||
pt,
|
||||
)
|
||||
else
|
||||
actual_field_ptr_info.flags.alignment,
|
||||
@ -28035,14 +28025,14 @@ fn fieldCallBind(
|
||||
}
|
||||
if (field_name.toUnsigned(ip)) |field_index| {
|
||||
if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field;
|
||||
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, zcu), field_index, object_ptr);
|
||||
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.fieldType(field_index, zcu), field_index, object_ptr);
|
||||
}
|
||||
} else {
|
||||
const max = concrete_ty.structFieldCount(zcu);
|
||||
for (0..max) |i_usize| {
|
||||
const i: u32 = @intCast(i_usize);
|
||||
if (field_name == concrete_ty.structFieldName(i, zcu).unwrap().?) {
|
||||
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, zcu), i, object_ptr);
|
||||
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.fieldType(i, zcu), i, object_ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -28340,12 +28330,10 @@ fn structFieldPtrByIndex(
|
||||
@enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
|
||||
} else {
|
||||
// Our alignment is capped at the field alignment.
|
||||
const field_align = try Type.fromInterned(field_ty).structFieldAlignmentAdvanced(
|
||||
const field_align = try Type.fromInterned(field_ty).structFieldAlignmentSema(
|
||||
struct_type.fieldAlign(ip, field_index),
|
||||
struct_type.layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
pt,
|
||||
);
|
||||
ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none)
|
||||
field_align
|
||||
@ -28477,7 +28465,7 @@ fn tupleFieldValByIndex(
|
||||
) CompileError!Air.Inst.Ref {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const field_ty = tuple_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = tuple_ty.fieldType(field_index, zcu);
|
||||
|
||||
if (tuple_ty.structFieldIsComptime(field_index, zcu))
|
||||
try tuple_ty.resolveStructFieldInits(pt);
|
||||
@ -28538,13 +28526,7 @@ fn unionFieldPtr(
|
||||
union_ptr_info.flags.alignment
|
||||
else
|
||||
try union_ty.abiAlignmentSema(pt);
|
||||
const field_align = try Type.unionFieldNormalAlignmentAdvanced(
|
||||
union_obj,
|
||||
field_index,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
);
|
||||
const field_align = try union_ty.fieldAlignmentSema(field_index, pt);
|
||||
break :blk union_align.min(field_align);
|
||||
} else union_ptr_info.flags.alignment,
|
||||
},
|
||||
@ -28921,7 +28903,7 @@ fn tupleFieldPtr(
|
||||
});
|
||||
}
|
||||
|
||||
const field_ty = tuple_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = tuple_ty.fieldType(field_index, zcu);
|
||||
const ptr_field_ty = try pt.ptrTypeSema(.{
|
||||
.child = field_ty.toIntern(),
|
||||
.flags = .{
|
||||
@ -28979,7 +28961,7 @@ fn tupleField(
|
||||
});
|
||||
}
|
||||
|
||||
const field_ty = tuple_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = tuple_ty.fieldType(field_index, zcu);
|
||||
|
||||
if (tuple_ty.structFieldIsComptime(field_index, zcu))
|
||||
try tuple_ty.resolveStructFieldInits(pt);
|
||||
@ -30615,9 +30597,9 @@ pub fn coerceInMemoryAllowed(
|
||||
const field_count = dest_ty.structFieldCount(zcu);
|
||||
for (0..field_count) |field_idx| {
|
||||
if (dest_ty.structFieldIsComptime(field_idx, zcu) != src_ty.structFieldIsComptime(field_idx, zcu)) break :tuple;
|
||||
if (dest_ty.structFieldAlign(field_idx, zcu) != src_ty.structFieldAlign(field_idx, zcu)) break :tuple;
|
||||
const dest_field_ty = dest_ty.structFieldType(field_idx, zcu);
|
||||
const src_field_ty = src_ty.structFieldType(field_idx, zcu);
|
||||
if (dest_ty.fieldAlignment(field_idx, zcu) != src_ty.fieldAlignment(field_idx, zcu)) break :tuple;
|
||||
const dest_field_ty = dest_ty.fieldType(field_idx, zcu);
|
||||
const src_field_ty = src_ty.fieldType(field_idx, zcu);
|
||||
const field = try sema.coerceInMemoryAllowed(block, dest_field_ty, src_field_ty, dest_is_mut, target, dest_src, src_src, null);
|
||||
if (field != .ok) break :tuple;
|
||||
}
|
||||
@ -35073,7 +35055,7 @@ fn resolvePeerTypesInner(
|
||||
peer_field_val.* = null;
|
||||
continue;
|
||||
};
|
||||
peer_field_ty.* = ty.structFieldType(field_index, zcu);
|
||||
peer_field_ty.* = ty.fieldType(field_index, zcu);
|
||||
peer_field_val.* = if (opt_val) |val| try val.fieldValue(pt, field_index) else null;
|
||||
}
|
||||
|
||||
@ -35095,7 +35077,7 @@ fn resolvePeerTypesInner(
|
||||
// Already-resolved types won't be referenced by the error so it's fine
|
||||
// to leave them undefined.
|
||||
const ty = opt_ty orelse continue;
|
||||
peer_field_ty.* = ty.structFieldType(field_index, zcu);
|
||||
peer_field_ty.* = ty.fieldType(field_index, zcu);
|
||||
}
|
||||
|
||||
return .{ .field_error = .{
|
||||
@ -35220,9 +35202,9 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
|
||||
.elem_ty = Type.noreturn,
|
||||
};
|
||||
if (!ty.isTuple(zcu)) return null;
|
||||
const elem_ty = ty.structFieldType(0, zcu);
|
||||
const elem_ty = ty.fieldType(0, zcu);
|
||||
for (1..field_count) |i| {
|
||||
if (!ty.structFieldType(i, zcu).eql(elem_ty, zcu)) {
|
||||
if (!ty.fieldType(i, zcu).eql(elem_ty, zcu)) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -35309,12 +35291,10 @@ pub fn resolveStructAlignment(
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
||||
if (struct_type.fieldIsComptime(ip, i) or try field_ty.comptimeOnlySema(pt))
|
||||
continue;
|
||||
const field_align = try field_ty.structFieldAlignmentAdvanced(
|
||||
const field_align = try field_ty.structFieldAlignmentSema(
|
||||
struct_type.fieldAlign(ip, i),
|
||||
struct_type.layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
pt,
|
||||
);
|
||||
alignment = alignment.maxStrict(field_align);
|
||||
}
|
||||
@ -35375,12 +35355,10 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
},
|
||||
else => return err,
|
||||
};
|
||||
field_align.* = try field_ty.structFieldAlignmentAdvanced(
|
||||
field_align.* = try field_ty.structFieldAlignmentSema(
|
||||
struct_type.fieldAlign(ip, i),
|
||||
struct_type.layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
pt,
|
||||
);
|
||||
big_align = big_align.maxStrict(field_align.*);
|
||||
}
|
||||
|
||||
@ -542,7 +542,7 @@ const PackValueBits = struct {
|
||||
while (it.next()) |field_idx| {
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8;
|
||||
try pack.padding(want_bit_off - cur_bit_off);
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
cur_bit_off = want_bit_off + field_ty.bitSize(zcu);
|
||||
}
|
||||
@ -552,7 +552,7 @@ const PackValueBits = struct {
|
||||
var cur_bit_off: u64 = ty.bitSize(zcu);
|
||||
var it = zcu.typeToStruct(ty).?.iterateRuntimeOrderReverse(ip);
|
||||
while (it.next()) |field_idx| {
|
||||
const field_ty = ty.structFieldType(field_idx, zcu);
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
const want_bit_off = ty.structFieldOffset(field_idx, zcu) * 8 + field_ty.bitSize(zcu);
|
||||
try pack.padding(cur_bit_off - want_bit_off);
|
||||
elems[field_idx] = (try pack.get(field_ty)).toIntern();
|
||||
@ -578,7 +578,7 @@ const PackValueBits = struct {
|
||||
// This is identical between LE and BE targets.
|
||||
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
|
||||
for (elems, 0..) |*elem, i| {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
elem.* = (try pack.get(field_ty)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
|
||||
@ -451,7 +451,7 @@ fn loadComptimePtrInner(
|
||||
.@"packed" => break, // let the bitcast logic handle this
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt);
|
||||
const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
cur_val = try cur_val.getElem(sema.pt, field_idx);
|
||||
cur_offset -= start_off;
|
||||
@ -873,7 +873,7 @@ fn prepareComptimePtrStore(
|
||||
.@"packed" => break, // let the bitcast logic handle this
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const end_off = start_off + try cur_ty.structFieldType(field_idx, zcu).abiSizeSema(pt);
|
||||
const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
cur_val = try cur_val.elem(pt, sema.arena, field_idx);
|
||||
cur_offset -= start_off;
|
||||
|
||||
127
src/Type.zig
127
src/Type.zig
@ -3191,8 +3191,8 @@ pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 {
|
||||
};
|
||||
}
|
||||
|
||||
/// Supports structs and unions.
|
||||
pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
|
||||
/// Returns the field type. Supports structs and unions.
|
||||
pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]),
|
||||
@ -3205,17 +3205,26 @@ pub fn structFieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn structFieldAlign(ty: Type, index: usize, zcu: *Zcu) Alignment {
|
||||
return ty.structFieldAlignAdvanced(index, .normal, zcu, {}) catch unreachable;
|
||||
pub fn fieldAlignment(ty: Type, index: usize, zcu: *Zcu) Alignment {
|
||||
return ty.fieldAlignmentInner(index, .normal, zcu, {}) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn structFieldAlignAdvanced(
|
||||
pub fn fieldAlignmentSema(ty: Type, index: usize, pt: Zcu.PerThread) SemaError!Alignment {
|
||||
return try ty.fieldAlignmentInner(index, .sema, pt.zcu, pt.tid);
|
||||
}
|
||||
|
||||
/// Returns the field alignment. Supports structs and unions.
|
||||
/// If `strat` is `.sema`, may perform type resolution.
|
||||
/// Asserts the layout is not packed.
|
||||
///
|
||||
/// Provide the struct field as the `ty`.
|
||||
pub fn fieldAlignmentInner(
|
||||
ty: Type,
|
||||
index: usize,
|
||||
comptime strat: ResolveStrat,
|
||||
zcu: *Zcu,
|
||||
tid: strat.Tid(),
|
||||
) !Alignment {
|
||||
) SemaError!Alignment {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
@ -3223,13 +3232,7 @@ pub fn structFieldAlignAdvanced(
|
||||
assert(struct_type.layout != .@"packed");
|
||||
const explicit_align = struct_type.fieldAlign(ip, index);
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
|
||||
return field_ty.structFieldAlignmentAdvanced(
|
||||
explicit_align,
|
||||
struct_type.layout,
|
||||
strat,
|
||||
zcu,
|
||||
tid,
|
||||
);
|
||||
return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid);
|
||||
},
|
||||
.anon_struct_type => |anon_struct| {
|
||||
return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner(
|
||||
@ -3240,28 +3243,62 @@ pub fn structFieldAlignAdvanced(
|
||||
},
|
||||
.union_type => {
|
||||
const union_obj = ip.loadUnionType(ty.toIntern());
|
||||
return unionFieldNormalAlignmentAdvanced(
|
||||
union_obj,
|
||||
@intCast(index),
|
||||
strat,
|
||||
zcu,
|
||||
tid,
|
||||
);
|
||||
const layout = union_obj.flagsUnordered(ip).layout;
|
||||
assert(layout != .@"packed");
|
||||
const explicit_align = union_obj.fieldAlign(ip, index);
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[index]);
|
||||
return field_ty.unionFieldAlignmentInner(explicit_align, layout, strat, zcu, tid);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
|
||||
/// If `strat` is `.sema`, may perform type resolution.
|
||||
pub fn structFieldAlignmentAdvanced(
|
||||
/// Returns the alignment of a non-packed struct field. Assert the layout is not packed.
|
||||
///
|
||||
/// Asserts that all resolution needed was done.
|
||||
pub fn structFieldAlignment(
|
||||
field_ty: Type,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
zcu: *Zcu,
|
||||
) Alignment {
|
||||
return field_ty.structFieldAlignmentInner(
|
||||
explicit_alignment,
|
||||
layout,
|
||||
.normal,
|
||||
zcu,
|
||||
{},
|
||||
) catch unreachable;
|
||||
}
|
||||
|
||||
/// Returns the alignment of a non-packed struct field. Assert the layout is not packed.
|
||||
/// May do type resolution when needed.
|
||||
/// Asserts that all resolution needed was done.
|
||||
pub fn structFieldAlignmentSema(
|
||||
field_ty: Type,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
pt: Zcu.PerThread,
|
||||
) SemaError!Alignment {
|
||||
return try field_ty.structFieldAlignmentInner(
|
||||
explicit_alignment,
|
||||
layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns the alignment of a non-packed struct field. Asserts the layout is not packed.
|
||||
/// If `strat` is `.sema`, may perform type resolution.
|
||||
pub fn structFieldAlignmentInner(
|
||||
field_ty: Type,
|
||||
explicit_alignment: Alignment,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
comptime strat: Type.ResolveStrat,
|
||||
zcu: *Zcu,
|
||||
tid: strat.Tid(),
|
||||
) Zcu.SemaError!InternPool.Alignment {
|
||||
) SemaError!Alignment {
|
||||
assert(layout != .@"packed");
|
||||
if (explicit_alignment != .none) return explicit_alignment;
|
||||
const ty_abi_align = (try field_ty.abiAlignmentInner(
|
||||
@ -3281,29 +3318,31 @@ pub fn structFieldAlignmentAdvanced(
|
||||
return ty_abi_align;
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
|
||||
pub fn unionFieldNormalAlignment(
|
||||
loaded_union: InternPool.LoadedUnionType,
|
||||
field_index: u32,
|
||||
zcu: *Zcu,
|
||||
) InternPool.Alignment {
|
||||
return unionFieldNormalAlignmentAdvanced(loaded_union, field_index, .normal, zcu, {}) catch unreachable;
|
||||
pub fn unionFieldAlignmentSema(
|
||||
field_ty: Type,
|
||||
explicit_alignment: Alignment,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
pt: Zcu.PerThread,
|
||||
) SemaError!Alignment {
|
||||
return field_ty.unionFieldAlignmentInner(
|
||||
explicit_alignment,
|
||||
layout,
|
||||
.sema,
|
||||
pt.zcu,
|
||||
pt.tid,
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed union. Asserts the layout is not packed.
|
||||
/// If `strat` is `.sema`, may perform type resolution.
|
||||
pub fn unionFieldNormalAlignmentAdvanced(
|
||||
loaded_union: InternPool.LoadedUnionType,
|
||||
field_index: u32,
|
||||
pub fn unionFieldAlignmentInner(
|
||||
field_ty: Type,
|
||||
explicit_alignment: Alignment,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
comptime strat: Type.ResolveStrat,
|
||||
zcu: *Zcu,
|
||||
tid: strat.Tid(),
|
||||
) Zcu.SemaError!InternPool.Alignment {
|
||||
const ip = &zcu.intern_pool;
|
||||
assert(loaded_union.flagsUnordered(ip).layout != .@"packed");
|
||||
const field_align = loaded_union.fieldAlign(ip, field_index);
|
||||
if (field_align != .none) return field_align;
|
||||
const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
|
||||
) SemaError!Alignment {
|
||||
assert(layout != .@"packed");
|
||||
if (explicit_alignment != .none) return explicit_alignment;
|
||||
if (field_ty.isNoReturn(zcu)) return .none;
|
||||
return (try field_ty.abiAlignmentInner(strat.toLazy(), zcu, tid)).scalar;
|
||||
}
|
||||
@ -3608,12 +3647,12 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
|
||||
|
||||
const zcu = pt.zcu;
|
||||
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
|
||||
const field_ty = struct_ty.structFieldType(field_idx, zcu);
|
||||
const field_ty = struct_ty.fieldType(field_idx, zcu);
|
||||
|
||||
var bit_offset: u16 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
for (0..struct_ty.structFieldCount(zcu)) |i| {
|
||||
const f_ty = struct_ty.structFieldType(i, zcu);
|
||||
const f_ty = struct_ty.fieldType(i, zcu);
|
||||
if (i == field_idx) {
|
||||
bit_offset = running_bits;
|
||||
}
|
||||
|
||||
@ -1414,7 +1414,7 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value {
|
||||
const zcu = pt.zcu;
|
||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
.undef => |ty| Value.fromInterned(try pt.intern(.{
|
||||
.undef = Type.fromInterned(ty).structFieldType(index, zcu).toIntern(),
|
||||
.undef = Type.fromInterned(ty).fieldType(index, zcu).toIntern(),
|
||||
})),
|
||||
.aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
|
||||
.bytes => |bytes| try pt.intern(.{ .int = .{
|
||||
@ -3810,9 +3810,9 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
|
||||
// `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily.
|
||||
const field_ty: Type, const field_align: InternPool.Alignment = switch (aggregate_ty.zigTypeTag(zcu)) {
|
||||
.Struct => field: {
|
||||
const field_ty = aggregate_ty.structFieldType(field_idx, zcu);
|
||||
const field_ty = aggregate_ty.fieldType(field_idx, zcu);
|
||||
switch (aggregate_ty.containerLayout(zcu)) {
|
||||
.auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) },
|
||||
.auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) },
|
||||
.@"extern" => {
|
||||
// Well-defined layout, so just offset the pointer appropriately.
|
||||
const byte_off = aggregate_ty.structFieldOffset(field_idx, zcu);
|
||||
@ -3863,7 +3863,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
|
||||
const union_obj = zcu.typeToUnion(aggregate_ty).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(&zcu.intern_pool)[field_idx]);
|
||||
switch (aggregate_ty.containerLayout(zcu)) {
|
||||
.auto => break :field .{ field_ty, try aggregate_ty.structFieldAlignAdvanced(@intCast(field_idx), .sema, zcu, pt.tid) },
|
||||
.auto => break :field .{ field_ty, try aggregate_ty.fieldAlignmentSema(field_idx, pt) },
|
||||
.@"extern" => {
|
||||
// Point to the same address.
|
||||
const result_ty = try pt.ptrTypeSema(info: {
|
||||
@ -4198,14 +4198,14 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
|
||||
const base_ptr_ty = base_ptr.typeOf(zcu);
|
||||
const agg_ty = base_ptr_ty.childType(zcu);
|
||||
const field_ty, const field_align = switch (agg_ty.zigTypeTag(zcu)) {
|
||||
.Struct => .{ agg_ty.structFieldType(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(
|
||||
@intCast(field.index),
|
||||
.Struct => .{ agg_ty.fieldType(field.index, zcu), try agg_ty.fieldAlignmentInner(
|
||||
field.index,
|
||||
if (have_sema) .sema else .normal,
|
||||
pt.zcu,
|
||||
if (have_sema) pt.tid else {},
|
||||
) },
|
||||
.Union => .{ agg_ty.unionFieldTypeByIndex(@intCast(field.index), zcu), try agg_ty.structFieldAlignAdvanced(
|
||||
@intCast(field.index),
|
||||
.Union => .{ agg_ty.unionFieldTypeByIndex(field.index, zcu), try agg_ty.fieldAlignmentInner(
|
||||
field.index,
|
||||
if (have_sema) .sema else .normal,
|
||||
pt.zcu,
|
||||
if (have_sema) pt.tid else {},
|
||||
@ -4344,7 +4344,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
|
||||
.Struct => switch (cur_ty.containerLayout(zcu)) {
|
||||
.auto, .@"packed" => break,
|
||||
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
|
||||
const field_ty = cur_ty.structFieldType(field_idx, zcu);
|
||||
const field_ty = cur_ty.fieldType(field_idx, zcu);
|
||||
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
|
||||
const end_off = start_off + field_ty.abiSize(zcu);
|
||||
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
|
||||
@ -4401,7 +4401,7 @@ pub fn resolveLazy(
|
||||
.u64, .i64, .big_int => return val,
|
||||
.lazy_align, .lazy_size => return pt.intValue(
|
||||
Type.fromInterned(int.ty),
|
||||
(try val.getUnsignedIntInner(.sema, pt.zcu, pt.tid)).?,
|
||||
try val.toUnsignedIntSema(pt),
|
||||
),
|
||||
},
|
||||
.slice => |slice| {
|
||||
|
||||
@ -3040,38 +3040,6 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns 0 if the union is represented with 0 bits at runtime.
|
||||
pub fn unionAbiAlignment(pt: Zcu.PerThread, loaded_union: InternPool.LoadedUnionType) InternPool.Alignment {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const have_tag = loaded_union.flagsPtr(ip).runtime_tag.hasTag();
|
||||
var max_align: InternPool.Alignment = .none;
|
||||
if (have_tag) max_align = Type.fromInterned(loaded_union.enum_tag_ty).abiAlignment(zcu);
|
||||
for (loaded_union.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
|
||||
|
||||
const field_align = zcu.unionFieldNormalAlignment(loaded_union, @intCast(field_index));
|
||||
max_align = max_align.max(field_align);
|
||||
}
|
||||
return max_align;
|
||||
}
|
||||
|
||||
/// Returns the field alignment of a non-packed struct. Asserts the layout is not packed.
|
||||
pub fn structFieldAlignment(
|
||||
pt: Zcu.PerThread,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
field_ty: Type,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
) InternPool.Alignment {
|
||||
return field_ty.structFieldAlignmentAdvanced(
|
||||
explicit_alignment,
|
||||
layout,
|
||||
.normal,
|
||||
pt.zcu,
|
||||
{},
|
||||
) catch unreachable;
|
||||
}
|
||||
|
||||
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
|
||||
/// into the packed struct InternPool data rather than computing this on the
|
||||
/// fly, however it was found to perform worse when measured on real world
|
||||
|
||||
@ -4144,7 +4144,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const zcu = pt.zcu;
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const struct_ty = self.typeOf(operand);
|
||||
const struct_field_ty = struct_ty.structFieldType(index, zcu);
|
||||
const struct_field_ty = struct_ty.fieldType(index, zcu);
|
||||
const struct_field_offset = @as(u32, @intCast(struct_ty.structFieldOffset(index, zcu)));
|
||||
|
||||
switch (mcv) {
|
||||
@ -5473,10 +5473,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const reg_lock = self.register_manager.lockReg(rwo.reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0, zcu);
|
||||
const wrapped_ty = ty.fieldType(0, zcu);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1, zcu);
|
||||
const overflow_bit_ty = ty.fieldType(1, zcu);
|
||||
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
|
||||
const raw_cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty);
|
||||
|
||||
@ -95,7 +95,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u8 {
|
||||
var count: u8 = 0;
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
const field_count = countFloats(field_ty, zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
count += field_count;
|
||||
@ -130,7 +130,7 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
|
||||
const fields_len = ty.structFieldCount(zcu);
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
if (getFloatArrayType(field_ty, zcu)) |some| return some;
|
||||
}
|
||||
return null;
|
||||
|
||||
@ -2926,7 +2926,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const struct_ty = self.typeOf(operand);
|
||||
const struct_field_offset: u32 = @intCast(struct_ty.structFieldOffset(index, zcu));
|
||||
const struct_field_ty = struct_ty.structFieldType(index, zcu);
|
||||
const struct_field_ty = struct_ty.fieldType(index, zcu);
|
||||
|
||||
switch (mcv) {
|
||||
.dead, .unreach => unreachable,
|
||||
@ -5434,10 +5434,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const reg_lock = self.register_manager.lockReg(reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0, zcu);
|
||||
const wrapped_ty = ty.fieldType(0, zcu);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1, zcu);
|
||||
const overflow_bit_ty = ty.fieldType(1, zcu);
|
||||
const overflow_bit_offset: u32 = @intCast(ty.structFieldOffset(1, zcu));
|
||||
const cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
|
||||
@ -44,8 +44,8 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
|
||||
const fields = ty.structFieldCount(zcu);
|
||||
var i: u32 = 0;
|
||||
while (i < fields) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_alignment = ty.structFieldAlign(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
const field_alignment = ty.fieldAlignment(i, zcu);
|
||||
const field_size = field_ty.bitSize(zcu);
|
||||
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
|
||||
return Class.arrSize(bit_size, 64);
|
||||
@ -66,7 +66,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
|
||||
|
||||
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
|
||||
if (Type.fromInterned(field_ty).bitSize(zcu) > 32 or
|
||||
Type.unionFieldNormalAlignment(union_obj, @intCast(field_index), zcu).compare(.gt, .@"32"))
|
||||
ty.fieldAlignment(field_index, zcu).compare(.gt, .@"32"))
|
||||
{
|
||||
return Class.arrSize(bit_size, 64);
|
||||
}
|
||||
@ -141,7 +141,7 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
|
||||
var count: u32 = 0;
|
||||
var i: u32 = 0;
|
||||
while (i < fields_len) : (i += 1) {
|
||||
const field_ty = ty.structFieldType(i, zcu);
|
||||
const field_ty = ty.fieldType(i, zcu);
|
||||
const field_count = countFloats(field_ty, zcu, maybe_float_bits);
|
||||
if (field_count == invalid) return invalid;
|
||||
count += field_count;
|
||||
|
||||
@ -4576,7 +4576,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
|
||||
const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
|
||||
const src_mcv = try func.resolveInst(operand);
|
||||
const struct_ty = func.typeOf(operand);
|
||||
const field_ty = struct_ty.structFieldType(index, zcu);
|
||||
const field_ty = struct_ty.fieldType(index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
||||
|
||||
const field_off: u32 = switch (struct_ty.containerLayout(zcu)) {
|
||||
@ -7882,7 +7882,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
|
||||
const elem_i: u32 = @intCast(elem_i_usize);
|
||||
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_i, zcu);
|
||||
const elem_ty = result_ty.fieldType(elem_i, zcu);
|
||||
const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
|
||||
if (elem_bit_size > 64) {
|
||||
return func.fail(
|
||||
@ -7916,7 +7916,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
|
||||
} else for (elements, 0..) |elem, elem_i| {
|
||||
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_i, zcu);
|
||||
const elem_ty = result_ty.fieldType(elem_i, zcu);
|
||||
const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
|
||||
const elem_mcv = try func.resolveInst(elem);
|
||||
try func.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv);
|
||||
|
||||
@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
|
||||
var any_fp = false;
|
||||
var field_count: usize = 0;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.structFieldType(field_index, zcu);
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
if (field_ty.isRuntimeFloat())
|
||||
any_fp = true
|
||||
|
||||
@ -3980,10 +3980,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const reg_lock = self.register_manager.lockReg(rwo.reg);
|
||||
defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
|
||||
|
||||
const wrapped_ty = ty.structFieldType(0, zcu);
|
||||
const wrapped_ty = ty.fieldType(0, zcu);
|
||||
try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg });
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1, zcu);
|
||||
const overflow_bit_ty = ty.fieldType(1, zcu);
|
||||
const overflow_bit_offset = @as(u32, @intCast(ty.structFieldOffset(1, zcu)));
|
||||
const cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
|
||||
@ -3954,7 +3954,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const struct_ty = func.typeOf(struct_field.struct_operand);
|
||||
const operand = try func.resolveInst(struct_field.struct_operand);
|
||||
const field_index = struct_field.field_index;
|
||||
const field_ty = struct_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = struct_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
|
||||
|
||||
const result: WValue = switch (struct_ty.containerLayout(zcu)) {
|
||||
@ -5378,7 +5378,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
for (elements, 0..) |elem, elem_index| {
|
||||
if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_index, zcu);
|
||||
const elem_ty = result_ty.fieldType(elem_index, zcu);
|
||||
const field_offset = result_ty.structFieldOffset(elem_index, zcu);
|
||||
_ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
|
||||
prev_field_offset = field_offset;
|
||||
|
||||
@ -108,7 +108,7 @@ pub fn scalarType(ty: Type, zcu: *Zcu) Type {
|
||||
return scalarType(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), zcu);
|
||||
} else {
|
||||
assert(ty.structFieldCount(zcu) == 1);
|
||||
return scalarType(ty.structFieldType(0, zcu), zcu);
|
||||
return scalarType(ty.fieldType(0, zcu), zcu);
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
|
||||
@ -4352,14 +4352,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.genSetMem(
|
||||
.{ .frame = frame_index },
|
||||
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
||||
tuple_ty.structFieldType(1, zcu),
|
||||
tuple_ty.fieldType(1, zcu),
|
||||
.{ .eflags = cc },
|
||||
.{},
|
||||
);
|
||||
try self.genSetMem(
|
||||
.{ .frame = frame_index },
|
||||
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
||||
tuple_ty.structFieldType(0, zcu),
|
||||
tuple_ty.fieldType(0, zcu),
|
||||
partial_mcv,
|
||||
.{},
|
||||
);
|
||||
@ -4392,7 +4392,7 @@ fn genSetFrameTruncatedOverflowCompare(
|
||||
};
|
||||
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const ty = tuple_ty.structFieldType(0, zcu);
|
||||
const ty = tuple_ty.fieldType(0, zcu);
|
||||
const int_info = ty.intInfo(zcu);
|
||||
|
||||
const hi_bits = (int_info.bits - 1) % 64 + 1;
|
||||
@ -4450,7 +4450,7 @@ fn genSetFrameTruncatedOverflowCompare(
|
||||
try self.genSetMem(
|
||||
.{ .frame = frame_index },
|
||||
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
||||
tuple_ty.structFieldType(1, zcu),
|
||||
tuple_ty.fieldType(1, zcu),
|
||||
if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne },
|
||||
.{},
|
||||
);
|
||||
@ -4637,7 +4637,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.genSetMem(
|
||||
.{ .frame = dst_mcv.load_frame.index },
|
||||
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
||||
tuple_ty.structFieldType(0, zcu),
|
||||
tuple_ty.fieldType(0, zcu),
|
||||
result,
|
||||
.{},
|
||||
);
|
||||
@ -4649,7 +4649,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.genSetMem(
|
||||
.{ .frame = dst_mcv.load_frame.index },
|
||||
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
||||
tuple_ty.structFieldType(1, zcu),
|
||||
tuple_ty.fieldType(1, zcu),
|
||||
.{ .eflags = .ne },
|
||||
.{},
|
||||
);
|
||||
@ -4761,14 +4761,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.genSetMem(
|
||||
.{ .frame = dst_mcv.load_frame.index },
|
||||
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
||||
tuple_ty.structFieldType(0, zcu),
|
||||
tuple_ty.fieldType(0, zcu),
|
||||
.{ .register_pair = .{ .rax, .rdx } },
|
||||
.{},
|
||||
);
|
||||
try self.genSetMem(
|
||||
.{ .frame = dst_mcv.load_frame.index },
|
||||
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
||||
tuple_ty.structFieldType(1, zcu),
|
||||
tuple_ty.fieldType(1, zcu),
|
||||
.{ .register = tmp_regs[1] },
|
||||
.{},
|
||||
);
|
||||
@ -4816,14 +4816,14 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.genSetMem(
|
||||
.{ .frame = frame_index },
|
||||
@intCast(tuple_ty.structFieldOffset(0, zcu)),
|
||||
tuple_ty.structFieldType(0, zcu),
|
||||
tuple_ty.fieldType(0, zcu),
|
||||
partial_mcv,
|
||||
.{},
|
||||
);
|
||||
try self.genSetMem(
|
||||
.{ .frame = frame_index },
|
||||
@intCast(tuple_ty.structFieldOffset(1, zcu)),
|
||||
tuple_ty.structFieldType(1, zcu),
|
||||
tuple_ty.fieldType(1, zcu),
|
||||
.{ .immediate = 0 }, // cc being set is impossible
|
||||
.{},
|
||||
);
|
||||
@ -8143,7 +8143,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const container_ty = self.typeOf(operand);
|
||||
const container_rc = self.regClassForType(container_ty);
|
||||
const field_ty = container_ty.structFieldType(index, zcu);
|
||||
const field_ty = container_ty.fieldType(index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
|
||||
const field_rc = self.regClassForType(field_ty);
|
||||
const field_is_gp = field_rc.supersetOf(abi.RegisterClass.gp);
|
||||
@ -15273,14 +15273,14 @@ fn genSetMem(
|
||||
try self.genSetMem(
|
||||
base,
|
||||
disp + @as(i32, @intCast(ty.structFieldOffset(0, zcu))),
|
||||
ty.structFieldType(0, zcu),
|
||||
ty.fieldType(0, zcu),
|
||||
.{ .register = ro.reg },
|
||||
opts,
|
||||
);
|
||||
try self.genSetMem(
|
||||
base,
|
||||
disp + @as(i32, @intCast(ty.structFieldOffset(1, zcu))),
|
||||
ty.structFieldType(1, zcu),
|
||||
ty.fieldType(1, zcu),
|
||||
.{ .eflags = ro.eflags },
|
||||
opts,
|
||||
);
|
||||
@ -18150,7 +18150,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const elem_i: u32 = @intCast(elem_i_usize);
|
||||
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_i, zcu);
|
||||
const elem_ty = result_ty.fieldType(elem_i, zcu);
|
||||
const elem_bit_size: u32 = @intCast(elem_ty.bitSize(zcu));
|
||||
if (elem_bit_size > 64) {
|
||||
return self.fail(
|
||||
@ -18232,7 +18232,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
} else for (elements, 0..) |elem, elem_i| {
|
||||
if ((try result_ty.structFieldValueComptime(pt, elem_i)) != null) continue;
|
||||
|
||||
const elem_ty = result_ty.structFieldType(elem_i, zcu);
|
||||
const elem_ty = result_ty.fieldType(elem_i, zcu);
|
||||
const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
|
||||
const elem_mcv = try self.resolveInst(elem);
|
||||
const mat_elem_mcv = switch (elem_mcv) {
|
||||
|
||||
@ -7206,7 +7206,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
var empty = true;
|
||||
for (0..elements.len) |field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
|
||||
const field_ty = inst_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = inst_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) {
|
||||
@ -7219,7 +7219,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
empty = true;
|
||||
for (resolved_elements, 0..) |element, field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
|
||||
const field_ty = inst_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = inst_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (!empty) try writer.writeAll(", ");
|
||||
|
||||
@ -2496,16 +2496,10 @@ pub const Object = struct {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
const field_size = field_ty.abiSize(zcu);
|
||||
const field_align = pt.structFieldAlignment(
|
||||
struct_type.fieldAlign(ip, field_index),
|
||||
field_ty,
|
||||
struct_type.layout,
|
||||
);
|
||||
const field_align = ty.fieldAlignment(field_index, zcu);
|
||||
const field_offset = ty.structFieldOffset(field_index, zcu);
|
||||
|
||||
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
|
||||
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
||||
|
||||
fields.appendAssumeCapacity(try o.builder.debugMemberType(
|
||||
try o.builder.metadataString(field_name.toSlice(ip)),
|
||||
.none, // File
|
||||
@ -2598,7 +2592,7 @@ pub const Object = struct {
|
||||
const field_size = Type.fromInterned(field_ty).abiSize(zcu);
|
||||
const field_align: InternPool.Alignment = switch (union_type.flagsUnordered(ip).layout) {
|
||||
.@"packed" => .none,
|
||||
.auto, .@"extern" => Type.unionFieldNormalAlignment(union_type, @intCast(field_index), zcu),
|
||||
.auto, .@"extern" => ty.fieldAlignment(field_index, zcu),
|
||||
};
|
||||
|
||||
const field_name = tag_type.names.get(ip)[field_index];
|
||||
@ -3315,11 +3309,7 @@ pub const Object = struct {
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
const field_align = pt.structFieldAlignment(
|
||||
struct_type.fieldAlign(ip, field_index),
|
||||
field_ty,
|
||||
struct_type.layout,
|
||||
);
|
||||
const field_align = t.fieldAlignment(field_index, zcu);
|
||||
const field_ty_align = field_ty.abiAlignment(zcu);
|
||||
if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed";
|
||||
big_align = big_align.max(field_align);
|
||||
@ -4127,11 +4117,7 @@ pub const Object = struct {
|
||||
var field_it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (field_it.next()) |field_index| {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
const field_align = pt.structFieldAlignment(
|
||||
struct_type.fieldAlign(ip, field_index),
|
||||
field_ty,
|
||||
struct_type.layout,
|
||||
);
|
||||
const field_align = ty.fieldAlignment(field_index, zcu);
|
||||
big_align = big_align.max(field_align);
|
||||
const prev_offset = offset;
|
||||
offset = field_align.forward(offset);
|
||||
@ -6528,7 +6514,7 @@ pub const FuncGen = struct {
|
||||
const struct_ty = self.typeOf(struct_field.struct_operand);
|
||||
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
|
||||
const field_index = struct_field.field_index;
|
||||
const field_ty = struct_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = struct_ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return .none;
|
||||
|
||||
if (!isByRef(struct_ty, zcu)) {
|
||||
@ -6590,7 +6576,7 @@ pub const FuncGen = struct {
|
||||
const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
|
||||
const field_ptr =
|
||||
try self.wip.gepStruct(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
|
||||
const alignment = struct_ty.structFieldAlign(field_index, zcu);
|
||||
const alignment = struct_ty.fieldAlignment(field_index, zcu);
|
||||
const field_ptr_ty = try pt.ptrType(.{
|
||||
.child = field_ty.toIntern(),
|
||||
.flags = .{ .alignment = alignment },
|
||||
@ -7471,8 +7457,8 @@ pub const FuncGen = struct {
|
||||
assert(self.err_ret_trace != .none);
|
||||
const field_ptr =
|
||||
try self.wip.gepStruct(struct_llvm_ty, self.err_ret_trace, llvm_field_index, "");
|
||||
const field_alignment = struct_ty.structFieldAlign(field_index, zcu);
|
||||
const field_ty = struct_ty.structFieldType(field_index, zcu);
|
||||
const field_alignment = struct_ty.fieldAlignment(field_index, zcu);
|
||||
const field_ty = struct_ty.fieldType(field_index, zcu);
|
||||
const field_ptr_ty = try pt.ptrType(.{
|
||||
.child = field_ty.toIntern(),
|
||||
.flags = .{ .alignment = field_alignment },
|
||||
@ -10080,7 +10066,7 @@ pub const FuncGen = struct {
|
||||
const field_ptr_ty = try pt.ptrType(.{
|
||||
.child = self.typeOf(elem).toIntern(),
|
||||
.flags = .{
|
||||
.alignment = result_ty.structFieldAlign(i, zcu),
|
||||
.alignment = result_ty.fieldAlignment(i, zcu),
|
||||
},
|
||||
});
|
||||
try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
|
||||
@ -10185,7 +10171,7 @@ pub const FuncGen = struct {
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
|
||||
const field_llvm_ty = try o.lowerType(field_ty);
|
||||
const field_size = field_ty.abiSize(zcu);
|
||||
const field_align = Type.unionFieldNormalAlignment(union_obj, extra.field_index, zcu);
|
||||
const field_align = union_ty.fieldAlignment(extra.field_index, zcu);
|
||||
const llvm_usize = try o.lowerType(Type.usize);
|
||||
const usize_zero = try o.builder.intValue(llvm_usize, 0);
|
||||
|
||||
@ -11188,7 +11174,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
|
||||
var types_len: usize = 0;
|
||||
var types: [8]Builder.Type = undefined;
|
||||
for (0..return_type.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = return_type.structFieldType(field_index, zcu);
|
||||
const field_ty = return_type.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
types[types_len] = try o.lowerType(field_ty);
|
||||
types_len += 1;
|
||||
@ -11444,7 +11430,7 @@ const ParamTypeIterator = struct {
|
||||
.fields => {
|
||||
it.types_len = 0;
|
||||
for (0..ty.structFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.structFieldType(field_index, zcu);
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
it.types_buffer[it.types_len] = try it.object.lowerType(field_ty);
|
||||
it.types_len += 1;
|
||||
|
||||
@ -5148,7 +5148,7 @@ const NavGen = struct {
|
||||
const object_ty = self.typeOf(struct_field.struct_operand);
|
||||
const object_id = try self.resolve(struct_field.struct_operand);
|
||||
const field_index = struct_field.field_index;
|
||||
const field_ty = object_ty.structFieldType(field_index, zcu);
|
||||
const field_ty = object_ty.fieldType(field_index, zcu);
|
||||
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
|
||||
|
||||
|
||||
@ -223,7 +223,7 @@ pub const MutableValue = union(enum) {
|
||||
@memset(elems[0..@intCast(len_no_sent)], .{ .interned = undef_elem });
|
||||
},
|
||||
.Struct => for (elems[0..@intCast(len_no_sent)], 0..) |*mut_elem, i| {
|
||||
const field_ty = ty.structFieldType(i, zcu).toIntern();
|
||||
const field_ty = ty.fieldType(i, zcu).toIntern();
|
||||
mut_elem.* = .{ .interned = try pt.intern(.{ .undef = field_ty }) };
|
||||
},
|
||||
else => unreachable,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user