codegen: fix field offsets in packed structs

* add nested packed struct/union behavior tests
 * use ptr_info.packed_offset rather than trying to duplicate the logic from Sema.structFieldPtrByIndex()
 * use the container_ptr_info.packed_offset to account for non-aligned nested structs.
 * dedup type.packedStructFieldBitOffset() and module.structPackedFieldBitOffset()
This commit is contained in:
Xavier Bouchoux 2023-07-29 20:08:08 +02:00
parent 412d863ba5
commit 62d178e91a
7 changed files with 362 additions and 65 deletions

View File

@ -3090,12 +3090,19 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
return func.lowerParentPtr(elem.base.toValue(), @as(u32, @intCast(elem_offset + offset)));
},
.field => |field| {
const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod);
const parent_ptr_ty = mod.intern_pool.typeOf(field.base).toType();
const parent_ty = parent_ptr_ty.childType(mod);
const field_index: u32 = @intCast(field.index);
const field_offset = switch (parent_ty.zigTypeTag(mod)) {
.Struct => switch (parent_ty.containerLayout(mod)) {
.Packed => parent_ty.packedStructFieldByteOffset(@as(usize, @intCast(field.index)), mod),
else => parent_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod),
.Struct => blk: {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
if (ptr.ty.toType().ptrInfo(mod).packed_offset.host_size == 0)
break :blk @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8)
else
break :blk 0;
}
break :blk parent_ty.structFieldOffset(field_index, mod);
},
.Union => switch (parent_ty.containerLayout(mod)) {
.Packed => 0,

View File

@ -5774,14 +5774,15 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32
const ptr_container_ty_info = ptr_container_ty.ptrInfo(mod);
const container_ty = ptr_container_ty.childType(mod);
const field_offset: i32 = @intCast(switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => container_ty.structFieldOffset(index, mod),
.Packed => if (container_ty.zigTypeTag(mod) == .Struct and
ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0)
container_ty.packedStructFieldByteOffset(index, mod) + @divExact(ptr_container_ty_info.packed_offset.bit_offset, 8)
else
0,
});
const field_offset: i32 = blk: {
if (mod.typeToPackedStruct(container_ty)) |struct_type| {
break :blk if (ptr_field_ty.ptrInfo(mod).packed_offset.host_size == 0)
@divExact(mod.structPackedFieldBitOffset(struct_type, index) + ptr_container_ty_info.packed_offset.bit_offset, 8)
else
0;
}
break :blk @intCast(container_ty.structFieldOffset(index, mod));
};
const src_mcv = try self.resolveInst(operand);
const dst_mcv = if (switch (src_mcv) {

View File

@ -5269,22 +5269,26 @@ fn fieldLocation(
const ip = &mod.intern_pool;
const container_ty = container_ptr_ty.childType(mod);
return switch (container_ty.zigTypeTag(mod)) {
.Struct => switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index_usize| {
.Struct => blk: {
if (mod.typeToPackedStruct(container_ty)) |struct_type| {
if (field_ptr_ty.ptrInfo(mod).packed_offset.host_size == 0)
break :blk .{ .byte_offset = @divExact(mod.structPackedFieldBitOffset(struct_type, field_index) + container_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8) }
else
break :blk .begin;
}
for (field_index..container_ty.structFieldCount(mod)) |next_field_index_usize| {
const next_field_index: u32 = @intCast(next_field_index_usize);
if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
const field_ty = container_ty.structFieldType(next_field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
break .{ .field = if (container_ty.isSimpleTuple(mod))
break :blk .{ .field = if (container_ty.isSimpleTuple(mod))
.{ .field = next_field_index }
else
.{ .identifier = ip.stringToSlice(container_ty.legacyStructFieldName(next_field_index, mod)) } };
} else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
.Packed => if (field_ptr_ty.ptrInfo(mod).packed_offset.host_size == 0)
.{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) + @divExact(container_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8) }
else
.begin,
}
break :blk if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin;
},
.Union => {
const union_obj = mod.typeToUnion(container_ty).?;

View File

@ -4,7 +4,6 @@ const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.codegen);
const math = std.math;
const native_endian = builtin.cpu.arch.endian();
const DW = std.dwarf;
const Builder = @import("llvm/Builder.zig");
@ -3770,7 +3769,7 @@ pub const Object = struct {
.opt_payload,
.elem,
.field,
=> try o.lowerParentPtr(val, ty.ptrInfo(mod).packed_offset.bit_offset % 8 == 0),
=> try o.lowerParentPtr(val),
.comptime_field => unreachable,
};
switch (ptr.len) {
@ -4230,15 +4229,16 @@ pub const Object = struct {
return o.lowerDeclRefValue(ptr_ty, decl_index);
}
fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Allocator.Error!Builder.Constant {
fn lowerParentPtr(o: *Object, ptr_val: Value) Allocator.Error!Builder.Constant {
const mod = o.module;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ptr_val.toIntern()).ptr.addr) {
const ptr = ip.indexToKey(ptr_val.toIntern()).ptr;
return switch (ptr.addr) {
.decl => |decl| o.lowerParentPtrDecl(decl),
.mut_decl => |mut_decl| o.lowerParentPtrDecl(mut_decl.decl),
.int => |int| try o.lowerIntAsPtr(int),
.eu_payload => |eu_ptr| {
const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true);
const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue());
const eu_ty = ip.typeOf(eu_ptr).toType().childType(mod);
const payload_ty = eu_ty.errorUnionPayload(mod);
@ -4256,7 +4256,7 @@ pub const Object = struct {
});
},
.opt_payload => |opt_ptr| {
const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true);
const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue());
const opt_ty = ip.typeOf(opt_ptr).toType().childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
@ -4274,7 +4274,7 @@ pub const Object = struct {
},
.comptime_field => unreachable,
.elem => |elem_ptr| {
const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true);
const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue());
const elem_ty = ip.typeOf(elem_ptr.base).toType().elemType2(mod);
return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
@ -4282,9 +4282,9 @@ pub const Object = struct {
});
},
.field => |field_ptr| {
const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
const parent_ty = ip.typeOf(field_ptr.base).toType().childType(mod);
const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue());
const parent_ptr_ty = ip.typeOf(field_ptr.base).toType();
const parent_ty = parent_ptr_ty.childType(mod);
const field_index: u32 = @intCast(field_ptr.index);
switch (parent_ty.zigTypeTag(mod)) {
.Union => {
@ -4309,22 +4309,14 @@ pub const Object = struct {
},
.Struct => {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
if (!byte_aligned) return parent_ptr;
const ptr_info = ptr.ty.toType().ptrInfo(mod);
if (ptr_info.packed_offset.host_size != 0) return parent_ptr;
const parent_ptr_info = parent_ptr_ty.ptrInfo(mod);
const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index) + parent_ptr_info.packed_offset.bit_offset;
const llvm_usize = try o.lowerType(Type.usize);
const base_addr =
try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
// count bits of fields before this one
// TODO https://github.com/ziglang/zig/issues/17178
const prev_bits = b: {
var b: usize = 0;
for (0..field_index) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
b += @intCast(field_ty.bitSize(mod));
}
break :b b;
};
const byte_offset = try o.builder.intConst(llvm_usize, prev_bits / 8);
const base_addr = try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
const byte_offset = try o.builder.intConst(llvm_usize, @divExact(bit_offset, 8));
const field_addr = try o.builder.binConst(.add, base_addr, byte_offset);
return o.builder.castConst(.inttoptr, field_addr, .ptr);
}

View File

@ -3028,24 +3028,10 @@ pub const Type = struct {
};
}
pub fn packedStructFieldBitOffset(ty: Type, field_index: usize, mod: *Module) u32 {
pub fn packedStructFieldByteOffset(ty: Type, field_index: u32, mod: *Module) u32 {
const ip = &mod.intern_pool;
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
assert(struct_type.layout == .Packed);
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u32 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, i| {
if (i == field_index) break;
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_bits: u32 = @intCast(field_ty.toType().bitSize(mod));
running_bits += field_bits;
}
return running_bits;
}
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
return packedStructFieldBitOffset(ty, field_index, mod) / 8;
return @divExact(mod.structPackedFieldBitOffset(struct_type, field_index), 8);
}
pub const FieldOffset = struct {

View File

@ -253,6 +253,79 @@ test "regular in irregular packed struct" {
try expectEqual(@as(u8, 42), foo.bar.b);
}
test "nested packed struct unaligned" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (native_endian != .Little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
a: u4,
b: u4,
c: u8,
};
const S2 = packed struct {
base: u8,
p0: S1,
bit0: u1,
p1: packed struct {
a: u8,
},
p2: packed struct {
a: u7,
b: u8,
},
p3: S1,
var s: @This() = .{
.base = 1,
.p0 = .{ .a = 2, .b = 3, .c = 4 },
.bit0 = 0,
.p1 = .{ .a = 5 },
.p2 = .{ .a = 6, .b = 7 },
.p3 = .{ .a = 8, .b = 9, .c = 10 },
};
};
try expect(S2.s.base == 1);
try expect(S2.s.p0.a == 2);
try expect(S2.s.p0.b == 3);
try expect(S2.s.p0.c == 4);
try expect(S2.s.bit0 == 0);
try expect(S2.s.p1.a == 5);
try expect(S2.s.p2.a == 6);
try expect(S2.s.p2.b == 7);
try expect(S2.s.p3.a == 8);
try expect(S2.s.p3.b == 9);
try expect(S2.s.p3.c == 10);
const S3 = packed struct {
pad: u8,
v: u2,
s: packed struct {
v: u3,
s: packed struct {
v: u2,
s: packed struct {
bit0: u1,
byte: u8,
bit1: u1,
},
},
},
var v0: @This() = .{ .pad = 0, .v = 1, .s = .{ .v = 2, .s = .{ .v = 3, .s = .{ .bit0 = 0, .byte = 4, .bit1 = 1 } } } };
};
try expect(S3.v0.v == 1);
try expect(S3.v0.s.v == 2);
try expect(S3.v0.s.s.v == 3);
try expect(S3.v0.s.s.s.bit0 == 0);
try expect(S3.v0.s.s.s.byte == 4);
try expect(S3.v0.s.s.s.bit1 == 1);
}
test "byte-aligned field pointer offsets" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -354,6 +427,45 @@ test "byte-aligned field pointer offsets" {
try comptime S.doTheTest();
}
test "nested packed struct field pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (native_endian != .Little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S2 = packed struct {
base: u8,
p0: packed struct {
a: u4,
b: u4,
c: u8,
},
bit: u1,
p1: packed struct {
a: u7,
b: u8,
},
var s: @This() = .{ .base = 1, .p0 = .{ .a = 2, .b = 3, .c = 4 }, .bit = 0, .p1 = .{ .a = 5, .b = 6 } };
};
const ptr_base = &S2.s.base;
const ptr_p0_a = &S2.s.p0.a;
const ptr_p0_b = &S2.s.p0.b;
const ptr_p0_c = &S2.s.p0.c;
const ptr_p1_a = &S2.s.p1.a;
const ptr_p1_b = &S2.s.p1.b;
try expectEqual(@as(u8, 1), ptr_base.*);
try expectEqual(@as(u4, 2), ptr_p0_a.*);
try expectEqual(@as(u4, 3), ptr_p0_b.*);
try expectEqual(@as(u8, 4), ptr_p0_c.*);
try expectEqual(@as(u7, 5), ptr_p1_a.*);
try expectEqual(@as(u8, 6), ptr_p1_b.*);
}
test "load pointer from packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -380,6 +492,7 @@ test "@intFromPtr on a packed struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (native_endian != .Little) return error.SkipZigTest;
const S = struct {
const P = packed struct {
@ -387,6 +500,7 @@ test "@intFromPtr on a packed struct field" {
y: u8,
z: u32,
};
var p0: P = P{
.x = 1,
.y = 2,
@ -396,6 +510,138 @@ test "@intFromPtr on a packed struct field" {
try expect(@intFromPtr(&S.p0.z) - @intFromPtr(&S.p0.x) == 2);
}
test "@intFromPtr on a packed struct field unaligned and nested" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (native_endian != .Little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
a: u4,
b: u4,
c: u8,
};
const S2 = packed struct {
base: u8,
p0: S1,
bit0: u1,
p1: packed struct {
a: u8,
},
p2: packed struct {
a: u7,
b: u8,
},
p3: S1,
var s: @This() = .{
.base = 1,
.p0 = .{ .a = 2, .b = 3, .c = 4 },
.bit0 = 0,
.p1 = .{ .a = 5 },
.p2 = .{ .a = 6, .b = 7 },
.p3 = .{ .a = 8, .b = 9, .c = 10 },
};
};
switch (comptime @alignOf(S2)) {
4 => {
comptime assert(@TypeOf(&S2.s.base) == *align(4) u8);
comptime assert(@TypeOf(&S2.s.p0.a) == *align(1:0:2) u4);
comptime assert(@TypeOf(&S2.s.p0.b) == *align(1:4:2) u4);
comptime assert(@TypeOf(&S2.s.p0.c) == *u8);
comptime assert(@TypeOf(&S2.s.bit0) == *align(4:24:8) u1);
comptime assert(@TypeOf(&S2.s.p1.a) == *align(4:25:8) u8);
comptime assert(@TypeOf(&S2.s.p2.a) == *align(4:33:8) u7);
comptime assert(@TypeOf(&S2.s.p2.b) == *u8);
comptime assert(@TypeOf(&S2.s.p3.a) == *align(2:0:2) u4);
comptime assert(@TypeOf(&S2.s.p3.b) == *align(2:4:2) u4);
comptime assert(@TypeOf(&S2.s.p3.c) == *u8);
},
8 => {
comptime assert(@TypeOf(&S2.s.base) == *align(8) u8);
comptime assert(@TypeOf(&S2.s.p0.a) == *align(1:0:2) u4);
comptime assert(@TypeOf(&S2.s.p0.b) == *align(1:4:2) u4);
comptime assert(@TypeOf(&S2.s.p0.c) == *u8);
comptime assert(@TypeOf(&S2.s.bit0) == *align(8:24:8) u1);
comptime assert(@TypeOf(&S2.s.p1.a) == *align(8:25:8) u8);
comptime assert(@TypeOf(&S2.s.p2.a) == *align(8:33:8) u7);
comptime assert(@TypeOf(&S2.s.p2.b) == *u8);
comptime assert(@TypeOf(&S2.s.p3.a) == *align(2:0:2) u4);
comptime assert(@TypeOf(&S2.s.p3.b) == *align(2:4:2) u4);
comptime assert(@TypeOf(&S2.s.p3.c) == *u8);
},
else => {},
}
try expect(@intFromPtr(&S2.s.base) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p0.a) - @intFromPtr(&S2.s) == 1);
try expect(@intFromPtr(&S2.s.p0.b) - @intFromPtr(&S2.s) == 1);
try expect(@intFromPtr(&S2.s.p0.c) - @intFromPtr(&S2.s) == 2);
try expect(@intFromPtr(&S2.s.bit0) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p1.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p2.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p2.b) - @intFromPtr(&S2.s) == 5);
try expect(@intFromPtr(&S2.s.p3.a) - @intFromPtr(&S2.s) == 6);
try expect(@intFromPtr(&S2.s.p3.b) - @intFromPtr(&S2.s) == 6);
try expect(@intFromPtr(&S2.s.p3.c) - @intFromPtr(&S2.s) == 7);
const S3 = packed struct {
pad: u8,
v: u2,
s: packed struct {
v: u3,
s: packed struct {
v: u2,
s: packed struct {
bit0: u1,
byte: u8,
bit1: u1,
},
},
},
var v0: @This() = .{ .pad = 0, .v = 1, .s = .{ .v = 2, .s = .{ .v = 3, .s = .{ .bit0 = 0, .byte = 4, .bit1 = 1 } } } };
};
comptime assert(@TypeOf(&S3.v0.v) == *align(4:8:4) u2);
comptime assert(@TypeOf(&S3.v0.s.v) == *align(4:10:4) u3);
comptime assert(@TypeOf(&S3.v0.s.s.v) == *align(4:13:4) u2);
comptime assert(@TypeOf(&S3.v0.s.s.s.bit0) == *align(4:15:4) u1);
comptime assert(@TypeOf(&S3.v0.s.s.s.byte) == *align(2) u8);
comptime assert(@TypeOf(&S3.v0.s.s.s.bit1) == *align(4:24:4) u1);
try expect(@intFromPtr(&S3.v0.v) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.v) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.v) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s.bit0) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s.byte) - @intFromPtr(&S3.v0) == 2);
try expect(@intFromPtr(&S3.v0.s.s.s.bit1) - @intFromPtr(&S3.v0) == 0);
}
test "packed struct fields modification" {
const Small = packed struct {
val: u8 = 0,
lo: u4 = 0,
hi: u4 = 0,
var p: @This() = undefined;
};
Small.p = .{
.val = 0x12,
.lo = 3,
.hi = 4,
};
try expect(@as(u16, @bitCast(Small.p)) == 0x4312);
Small.p.val -= Small.p.lo;
Small.p.val += Small.p.hi;
Small.p.hi -= Small.p.lo;
try expect(@as(u16, @bitCast(Small.p)) == 0x1313);
}
test "optional pointer in packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -410,7 +656,7 @@ test "optional pointer in packed struct" {
test "nested packed struct field access test" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -562,7 +808,7 @@ test "nested packed struct at non-zero offset 2" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = struct {
@ -700,7 +946,6 @@ test "packed struct initialized in bitcast" {
test "pointer to container level packed struct field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -727,7 +972,6 @@ test "store undefined to packed result location" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
var x: u4 = 0;
var s = packed struct { x: u4, y: u4 }{ .x = x, .y = if (x > 0) x else undefined };
@ -743,3 +987,19 @@ test "bitcast back and forth" {
try expect(s.one == s2.one);
try expect(s.two == s2.two);
}
test "field access of packed struct smaller than its abi size inside struct initialized with rls" {
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .arm) return error.SkipZigTest;
const S = struct {
ps: packed struct { x: i2, y: i2 },
fn init(cond: bool) @This() {
return .{ .ps = .{ .x = 0, .y = if (cond) 1 else 0 } };
}
};
var s = S.init(true);
// note: this bug is triggered by the == operator, expectEqual will hide it
try expect(@as(i2, 0) == s.ps.x);
try expect(@as(i2, 1) == s.ps.y);
}

View File

@ -38,3 +38,50 @@ test "flags in packed union" {
try expectEqual(true, test_bits.enable_1);
try expectEqual(false, test_bits.other_flags.flags.enable_1);
}
test "flags in packed union at offset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const FlagBits = packed union {
base_flags: packed union {
flags: packed struct(u4) {
enable_1: bool = true,
enable_2: bool = false,
enable_3: bool = false,
enable_4: bool = false,
},
bits: u4,
},
adv_flags: packed struct(u12) {
pad: u8 = 0,
adv: packed union {
flags: packed struct(u4) {
enable_1: bool = true,
enable_2: bool = false,
enable_3: bool = false,
enable_4: bool = false,
},
bits: u4,
},
},
};
var test_bits: FlagBits = .{ .adv_flags = .{ .adv = .{ .flags = .{} } } };
try expectEqual(@as(u8, 0), test_bits.adv_flags.pad);
try expectEqual(true, test_bits.adv_flags.adv.flags.enable_1);
try expectEqual(false, test_bits.adv_flags.adv.flags.enable_2);
test_bits.adv_flags.adv.flags.enable_1 = false;
test_bits.adv_flags.adv.flags.enable_2 = true;
try expectEqual(@as(u8, 0), test_bits.adv_flags.pad);
try expectEqual(false, test_bits.adv_flags.adv.flags.enable_1);
try expectEqual(true, test_bits.adv_flags.adv.flags.enable_2);
test_bits.adv_flags.adv.bits = 12;
try expectEqual(@as(u8, 0), test_bits.adv_flags.pad);
try expectEqual(false, test_bits.adv_flags.adv.flags.enable_1);
try expectEqual(false, test_bits.adv_flags.adv.flags.enable_2);
}