mirror of
https://github.com/ziglang/zig.git
synced 2026-01-05 21:13:24 +00:00
stage2: better codegen for byte-aligned packed struct fields
* Sema: handle overaligned packed struct field pointers * LLVM: handle byte-aligned packed struct field pointers
This commit is contained in:
parent
70dc910086
commit
3e30ba3f20
23
src/Sema.zig
23
src/Sema.zig
@ -18678,8 +18678,6 @@ fn structFieldPtrByIndex(
|
||||
|
||||
const target = sema.mod.getTarget();
|
||||
|
||||
// TODO handle when the struct pointer is overaligned, we should return a potentially
|
||||
// over-aligned field pointer too.
|
||||
if (struct_obj.layout == .Packed) {
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
|
||||
@ -18700,6 +18698,27 @@ fn structFieldPtrByIndex(
|
||||
ptr_ty_data.host_size = struct_ptr_ty_info.host_size;
|
||||
ptr_ty_data.bit_offset += struct_ptr_ty_info.bit_offset;
|
||||
}
|
||||
|
||||
const parent_align = if (struct_ptr_ty_info.@"align" != 0)
|
||||
struct_ptr_ty_info.@"align"
|
||||
else
|
||||
struct_ptr_ty_info.pointee_type.abiAlignment(target);
|
||||
ptr_ty_data.@"align" = parent_align;
|
||||
|
||||
// If the field happens to be byte-aligned, simplify the pointer type.
|
||||
// The pointee type bit size must match its ABI byte size so that loads and stores
|
||||
// do not interfere with the surrounding packed bits.
|
||||
if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0) {
|
||||
const byte_offset = ptr_ty_data.bit_offset / 8;
|
||||
const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target);
|
||||
const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target);
|
||||
if (elem_size_bytes * 8 == elem_size_bits) {
|
||||
const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, byte_offset | parent_align));
|
||||
ptr_ty_data.bit_offset = 0;
|
||||
ptr_ty_data.host_size = 0;
|
||||
ptr_ty_data.@"align" = new_align;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ptr_ty_data.@"align" = field.abi_align;
|
||||
}
|
||||
|
||||
@ -8311,23 +8311,41 @@ pub const FuncGen = struct {
|
||||
field_index: u32,
|
||||
) !?*const llvm.Value {
|
||||
if (self.liveness.isUnused(inst)) return null;
|
||||
|
||||
const target = self.dg.object.target;
|
||||
const struct_ty = struct_ptr_ty.childType();
|
||||
switch (struct_ty.zigTypeTag()) {
|
||||
.Struct => switch (struct_ty.containerLayout()) {
|
||||
.Packed => {
|
||||
// From LLVM's perspective, a pointer to a packed struct and a pointer
|
||||
// to a field of a packed struct are the same. The difference is in the
|
||||
// Zig pointer type which provides information for how to mask and shift
|
||||
// out the relevant bits when accessing the pointee.
|
||||
// Here we perform a bitcast because we want to use the host_size
|
||||
// as the llvm pointer element type.
|
||||
const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
|
||||
// TODO this can be removed if we change host_size to be bits instead
|
||||
// of bytes.
|
||||
return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
|
||||
const result_ty = self.air.typeOfIndex(inst);
|
||||
const result_ty_info = result_ty.ptrInfo().data;
|
||||
const result_llvm_ty = try self.dg.lowerType(result_ty);
|
||||
|
||||
if (result_ty_info.host_size != 0) {
|
||||
// From LLVM's perspective, a pointer to a packed struct and a pointer
|
||||
// to a field of a packed struct are the same. The difference is in the
|
||||
// Zig pointer type which provides information for how to mask and shift
|
||||
// out the relevant bits when accessing the pointee.
|
||||
// Here we perform a bitcast because we want to use the host_size
|
||||
// as the llvm pointer element type.
|
||||
return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
|
||||
}
|
||||
|
||||
// We have a pointer to a packed struct field that happens to be byte-aligned.
|
||||
// Offset our operand pointer by the correct number of bytes.
|
||||
const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target);
|
||||
if (byte_offset == 0) {
|
||||
return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
|
||||
}
|
||||
const llvm_bytes_ptr_ty = self.context.intType(8).pointerType(0);
|
||||
const ptr_as_bytes = self.builder.buildBitCast(struct_ptr, llvm_bytes_ptr_ty, "");
|
||||
const llvm_usize = try self.dg.lowerType(Type.usize);
|
||||
const llvm_index = llvm_usize.constInt(byte_offset, .False);
|
||||
const indices: [1]*const llvm.Value = .{llvm_index};
|
||||
const new_ptr = self.builder.buildInBoundsGEP(ptr_as_bytes, &indices, indices.len, "");
|
||||
return self.builder.buildBitCast(new_ptr, result_llvm_ty, "");
|
||||
},
|
||||
else => {
|
||||
const target = self.dg.module.getTarget();
|
||||
var ty_buf: Type.Payload.Pointer = undefined;
|
||||
if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
|
||||
return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
|
||||
|
||||
21
src/type.zig
21
src/type.zig
@ -5591,6 +5591,27 @@ pub const Type = extern union {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
assert(struct_obj.layout == .Packed);
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
|
||||
var bit_offset: u16 = undefined;
|
||||
var running_bits: u16 = 0;
|
||||
for (struct_obj.fields.values()) |f, i| {
|
||||
if (!f.ty.hasRuntimeBits()) continue;
|
||||
|
||||
if (i == field_index) {
|
||||
bit_offset = running_bits;
|
||||
}
|
||||
running_bits += @intCast(u16, f.ty.bitSize(target));
|
||||
}
|
||||
const host_size = (running_bits + 7) / 8;
|
||||
_ = host_size; // TODO big-endian
|
||||
const byte_offset = bit_offset / 8;
|
||||
return byte_offset;
|
||||
}
|
||||
|
||||
pub const FieldOffset = struct {
|
||||
field: usize,
|
||||
offset: u64,
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const expect = std.testing.expect;
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
@ -305,3 +306,67 @@ test "regular in irregular packed struct" {
|
||||
try expectEqual(@as(u16, 235), foo.bar.a);
|
||||
try expectEqual(@as(u8, 42), foo.bar.b);
|
||||
}
|
||||
|
||||
test "byte-aligned field pointer offsets" {
|
||||
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const A = packed struct {
|
||||
a: u8,
|
||||
b: u8,
|
||||
c: u8,
|
||||
d: u8,
|
||||
};
|
||||
|
||||
const B = packed struct {
|
||||
a: u16,
|
||||
b: u16,
|
||||
};
|
||||
|
||||
fn doTheTest() !void {
|
||||
var a: A = .{
|
||||
.a = 1,
|
||||
.b = 2,
|
||||
.c = 3,
|
||||
.d = 4,
|
||||
};
|
||||
comptime assert(@TypeOf(&a.a) == *align(4) u8);
|
||||
comptime assert(@TypeOf(&a.b) == *u8);
|
||||
comptime assert(@TypeOf(&a.c) == *align(2) u8);
|
||||
comptime assert(@TypeOf(&a.d) == *u8);
|
||||
try expect(a.a == 1);
|
||||
try expect(a.b == 2);
|
||||
try expect(a.c == 3);
|
||||
try expect(a.d == 4);
|
||||
a.a += 1;
|
||||
a.b += 1;
|
||||
a.c += 1;
|
||||
a.d += 1;
|
||||
try expect(a.a == 2);
|
||||
try expect(a.b == 3);
|
||||
try expect(a.c == 4);
|
||||
try expect(a.d == 5);
|
||||
|
||||
var b: B = .{
|
||||
.a = 1,
|
||||
.b = 2,
|
||||
};
|
||||
comptime assert(@TypeOf(&b.a) == *align(4) u16);
|
||||
comptime assert(@TypeOf(&b.b) == *u16);
|
||||
try expect(b.a == 1);
|
||||
try expect(b.b == 2);
|
||||
b.a += 1;
|
||||
b.b += 1;
|
||||
try expect(b.a == 2);
|
||||
try expect(b.b == 3);
|
||||
}
|
||||
};
|
||||
|
||||
try S.doTheTest();
|
||||
comptime try S.doTheTest();
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user