InternPool,Sema,type,llvm: alignment fixes

This changeset fixes the handling of alignment in several places. The
new rules are:
* `@alignOf(T)` where `T` is a runtime zero-bit type is at least 1,
  maybe greater.
* Zero-bit fields in `extern` structs *do* force alignment, potentially
  offsetting following fields.
* Zero-bit fields *do* have addresses within structs which can be
  observed and are consistent with `@offsetOf`.

These are not necessarily all implemented correctly yet (see disabled
test), but this commit fixes all regressions compared to master, and
makes one new test pass.
This commit is contained in:
mlugg 2023-09-21 02:00:52 +01:00 committed by Andrew Kelley
parent cd242b7440
commit 1b672e41c5
7 changed files with 127 additions and 44 deletions

View File

@ -34325,6 +34325,7 @@ pub fn resolveStructAlignment(
struct_type.flagsPtr(ip).alignment = result;
return result;
}
defer struct_type.clearAlignmentWip(ip);
var result: Alignment = .@"1";
@ -34337,7 +34338,7 @@ pub fn resolveStructAlignment(
field_ty,
struct_type.layout,
);
result = result.max(field_align);
result = result.maxStrict(field_align);
}
struct_type.flagsPtr(ip).alignment = result;
@ -34373,6 +34374,8 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len);
const sizes = try sema.arena.alloc(u64, struct_type.field_types.len);
var big_align: Alignment = .@"1";
for (aligns, sizes, 0..) |*field_align, *field_size, i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
@ -34381,6 +34384,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
field_align.* = .none;
continue;
}
field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
@ -34394,6 +34398,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
field_ty,
struct_type.layout,
);
big_align = big_align.maxStrict(field_align.*);
}
if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
@ -34409,8 +34414,13 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
if (struct_type.hasReorderedFields()) {
const runtime_order = struct_type.runtime_order.get(ip);
for (sizes, runtime_order, 0..) |size, *ro, i| {
ro.* = if (size != 0) @enumFromInt(i) else .omitted;
for (runtime_order, 0..) |*ro, i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
ro.* = .omitted;
} else {
ro.* = @enumFromInt(i);
}
}
const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder;
@ -34454,10 +34464,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
const offsets = struct_type.offsets.get(ip);
var it = struct_type.iterateRuntimeOrder(ip);
var offset: u64 = 0;
var big_align: Alignment = .@"1";
while (it.next()) |i| {
if (aligns[i] == .none) continue;
big_align = big_align.maxStrict(aligns[i]);
offsets[i] = @intCast(aligns[i].forward(offset));
offset = offsets[i] + sizes[i];
}

View File

@ -833,7 +833,10 @@ pub const Object = struct {
/// When an LLVM struct type is created, an entry is inserted into this
/// table for every zig source field of the struct that has a corresponding
/// LLVM struct field. comptime fields and 0 bit fields are not included.
/// LLVM struct field. comptime fields are not included. Zero-bit fields are
/// mapped to a field at the correct byte, which may be a padding field, or
/// are not mapped, in which case they are sematically at the end of the
/// struct.
/// The value is the LLVM struct field index.
/// This is denormalized data.
struct_field_map: std.AutoHashMapUnmanaged(ZigStructField, c_uint),
@ -2500,7 +2503,6 @@ pub const Object = struct {
try di_fields.ensureUnusedCapacity(gpa, field_types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index].toType();
@ -2511,8 +2513,7 @@ pub const Object = struct {
field_ty,
struct_type.layout,
);
const field_offset = field_align.forward(offset);
offset = field_offset + field_size;
const field_offset = ty.structFieldOffset(field_index, mod);
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index});
@ -3304,10 +3305,10 @@ pub const Object = struct {
var offset: u64 = 0;
var big_align: InternPool.Alignment = .@"1";
var struct_kind: Builder.Type.Structure.Kind = .normal;
// When we encounter a zero-bit field, we place it here so we know to map it to the next non-zero-bit field (if any).
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
@ -3324,6 +3325,20 @@ pub const Object = struct {
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
if (offset != struct_type.size(ip).*) {
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = field_index,
}, @intCast(llvm_field_types.items.len));
}
continue;
}
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = field_index,
@ -3360,12 +3375,14 @@ pub const Object = struct {
var offset: u64 = 0;
var big_align: InternPool.Alignment = .none;
const struct_size = t.abiSize(mod);
for (
anon_struct_type.types.get(ip),
anon_struct_type.values.get(ip),
0..,
) |field_ty, field_val, field_index| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
if (field_val != .none) continue;
const field_align = field_ty.toType().abiAlignment(mod);
big_align = big_align.max(field_align);
@ -3377,6 +3394,18 @@ pub const Object = struct {
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
if (offset != struct_size) {
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = @intCast(field_index),
}, @intCast(llvm_field_types.items.len));
}
continue;
}
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = @intCast(field_index),
@ -4019,7 +4048,6 @@ pub const Object = struct {
var field_it = struct_type.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
@ -4040,6 +4068,11 @@ pub const Object = struct {
llvm_index += 1;
}
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
vals[llvm_index] = try o.lowerValue(
(try val.fieldValue(mod, field_index)).toIntern(),
);
@ -6122,7 +6155,7 @@ pub const FuncGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType(mod);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return (try o.lowerPtrToVoid(ptr_ty)).toValue();
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.resolveInst(bin_op.lhs);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);

View File

@ -833,7 +833,7 @@ pub const Type = struct {
};
}
/// Returns `none` for 0-bit types.
/// Never returns `none`. Asserts that all necessary type resolution is already done.
pub fn abiAlignment(ty: Type, mod: *Module) Alignment {
return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}
@ -878,10 +878,10 @@ pub const Type = struct {
};
switch (ty.toIntern()) {
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .none },
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .none };
if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(int_type.bits, target) };
},
.ptr_type, .anyframe_type => {
@ -929,6 +929,7 @@ pub const Type = struct {
.isize,
.export_options,
.extern_options,
.type_info,
=> return .{
.scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
},
@ -974,8 +975,7 @@ pub const Type = struct {
.null,
.undefined,
.enum_literal,
.type_info,
=> return .{ .scalar = .none },
=> return .{ .scalar = .@"1" },
.noreturn => unreachable,
.generic_poison => unreachable,
@ -1010,11 +1010,9 @@ pub const Type = struct {
};
},
.anon_struct_type => |tuple| {
var big_align: Alignment = .none;
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
if (!(field_ty.toType().hasRuntimeBits(mod))) continue;
switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) {
.scalar => |field_align| big_align = big_align.max(field_align),
.val => switch (strat) {
@ -1059,7 +1057,7 @@ pub const Type = struct {
}
}
var max_align: Alignment = .none;
var max_align: Alignment = .@"1";
if (union_obj.hasTag(ip)) max_align = union_obj.enum_tag_ty.toType().abiAlignment(mod);
for (0..union_obj.field_names.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index].toType();
@ -1172,7 +1170,7 @@ pub const Type = struct {
.scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
},
.ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
.NoReturn => return .{ .scalar = .none },
.NoReturn => return .{ .scalar = .@"1" },
else => {},
}

View File

@ -1265,7 +1265,8 @@ pub const Value = struct {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.orderAgainstScalar(0),
inline .u64, .i64 => |x| std.math.order(x, 0),
.lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
.lazy_align => .gt, // alignment is never 0
.lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
mod,
false,
if (opt_sema) |sema| .{ .sema = sema } else .eager,

View File

@ -619,3 +619,58 @@ test "sub-aligned pointer field access" {
.Little => try expect(x == 0x09080706),
}
}
test "alignment of zero-bit types is respected" {
if (true) return error.SkipZigTest; // TODO
const S = struct { arr: [0]usize = .{} };
comptime assert(@alignOf(void) == 1);
comptime assert(@alignOf(u0) == 1);
comptime assert(@alignOf([0]usize) == @alignOf(usize));
comptime assert(@alignOf(S) == @alignOf(usize));
var s: S = .{};
var v32: void align(32) = {};
var x32: u0 align(32) = 0;
var s32: S align(32) = .{};
var zero: usize = 0;
try expect(@intFromPtr(&s) % @alignOf(usize) == 0);
try expect(@intFromPtr(&s.arr) % @alignOf(usize) == 0);
try expect(@intFromPtr(s.arr[zero..zero].ptr) % @alignOf(usize) == 0);
try expect(@intFromPtr(&v32) % 32 == 0);
try expect(@intFromPtr(&x32) % 32 == 0);
try expect(@intFromPtr(&s32) % 32 == 0);
try expect(@intFromPtr(&s32.arr) % 32 == 0);
try expect(@intFromPtr(s32.arr[zero..zero].ptr) % 32 == 0);
}
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = extern struct {
x: u8,
a: [0]u16 = .{},
y: u8,
};
// `a` should give `S` alignment 2, and pad the `arr` field.
comptime assert(@alignOf(S) == 2);
comptime assert(@sizeOf(S) == 4);
comptime assert(@offsetOf(S, "x") == 0);
comptime assert(@offsetOf(S, "a") == 2);
comptime assert(@offsetOf(S, "y") == 2);
var s: S = .{ .x = 100, .y = 200 };
try expect(@intFromPtr(&s) % 2 == 0);
try expect(@intFromPtr(&s.y) - @intFromPtr(&s.x) == 2);
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
try expect(@fieldParentPtr(S, "a", &s.a) == &s);
}

View File

@ -18,24 +18,13 @@ test "@alignOf(T) before referencing T" {
}
test "comparison of @alignOf(T) against zero" {
{
const T = struct { x: u32 };
try expect(!(@alignOf(T) == 0));
try expect(@alignOf(T) != 0);
try expect(!(@alignOf(T) < 0));
try expect(!(@alignOf(T) <= 0));
try expect(@alignOf(T) > 0);
try expect(@alignOf(T) >= 0);
}
{
const T = struct {};
try expect(@alignOf(T) == 0);
try expect(!(@alignOf(T) != 0));
try expect(!(@alignOf(T) < 0));
try expect(@alignOf(T) <= 0);
try expect(!(@alignOf(T) > 0));
try expect(@alignOf(T) >= 0);
}
const T = struct { x: u32 };
try expect(!(@alignOf(T) == 0));
try expect(@alignOf(T) != 0);
try expect(!(@alignOf(T) < 0));
try expect(!(@alignOf(T) <= 0));
try expect(@alignOf(T) > 0);
try expect(@alignOf(T) >= 0);
}
test "correct alignment for elements and slices of aligned array" {

View File

@ -37,7 +37,7 @@ test "switch on empty tagged union" {
test "empty union" {
const U = union {};
try expect(@sizeOf(U) == 0);
try expect(@alignOf(U) == 0);
try expect(@alignOf(U) == 1);
}
test "empty extern union" {