mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
stage2: integer-backed packed structs
This implements #10113 for the self-hosted compiler only. It removes the ability to override alignment of packed struct fields, and removes the ability to put pointers and arrays inside packed structs. After this commit, nearly all the behavior tests pass for the stage2 llvm backend that involve packed structs. I didn't implement the compile errors or compile error tests yet. I'm waiting until we have stage2 building itself and then I want to rework the compile error test harness with inspiration from Vexu's arocc test harness. At that point it should be a much nicer dev experience to work on compile errors.
This commit is contained in:
parent
65c0475970
commit
6249a24e81
@ -356,6 +356,7 @@ pub const TypeInfo = union(enum) {
|
|||||||
alignment: comptime_int,
|
alignment: comptime_int,
|
||||||
is_generic: bool,
|
is_generic: bool,
|
||||||
is_var_args: bool,
|
is_var_args: bool,
|
||||||
|
/// TODO change the language spec to make this not optional.
|
||||||
return_type: ?type,
|
return_type: ?type,
|
||||||
args: []const Param,
|
args: []const Param,
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ pub const EdidOverrideProtocol = extern struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub const EdidOverrideProtocolAttributes = packed struct {
|
pub const EdidOverrideProtocolAttributes = packed struct {
|
||||||
dont_override: bool align(4),
|
dont_override: bool,
|
||||||
enable_hot_plug: bool,
|
enable_hot_plug: bool,
|
||||||
_pad: u30 = 0,
|
_pad: u30 = 0,
|
||||||
};
|
};
|
||||||
|
|||||||
@ -4002,6 +4002,9 @@ fn structDeclInner(
|
|||||||
wip_members.nextField(bits_per_field, .{ have_align, have_value, is_comptime, unused });
|
wip_members.nextField(bits_per_field, .{ have_align, have_value, is_comptime, unused });
|
||||||
|
|
||||||
if (have_align) {
|
if (have_align) {
|
||||||
|
if (layout == .Packed) {
|
||||||
|
try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{});
|
||||||
|
}
|
||||||
const align_inst = try expr(&block_scope, &namespace.base, align_rl, member.ast.align_expr);
|
const align_inst = try expr(&block_scope, &namespace.base, align_rl, member.ast.align_expr);
|
||||||
wip_members.appendToField(@enumToInt(align_inst));
|
wip_members.appendToField(@enumToInt(align_inst));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -886,15 +886,6 @@ pub const Struct = struct {
|
|||||||
offset: u32,
|
offset: u32,
|
||||||
is_comptime: bool,
|
is_comptime: bool,
|
||||||
|
|
||||||
/// Returns the field alignment, assuming the struct is packed.
|
|
||||||
pub fn packedAlignment(field: Field) u32 {
|
|
||||||
if (field.abi_align.tag() == .abi_align_default) {
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
return @intCast(u32, field.abi_align.toUnsignedInt());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the field alignment, assuming the struct is not packed.
|
/// Returns the field alignment, assuming the struct is not packed.
|
||||||
pub fn normalAlignment(field: Field, target: Target) u32 {
|
pub fn normalAlignment(field: Field, target: Target) u32 {
|
||||||
if (field.abi_align.tag() == .abi_align_default) {
|
if (field.abi_align.tag() == .abi_align_default) {
|
||||||
@ -985,6 +976,31 @@ pub const Struct = struct {
|
|||||||
=> true,
|
=> true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 {
|
||||||
|
assert(s.layout == .Packed);
|
||||||
|
assert(s.haveFieldTypes());
|
||||||
|
var bit_sum: u64 = 0;
|
||||||
|
for (s.fields.values()) |field, i| {
|
||||||
|
if (i == index) {
|
||||||
|
return @intCast(u16, bit_sum);
|
||||||
|
}
|
||||||
|
bit_sum += field.ty.bitSize(target);
|
||||||
|
}
|
||||||
|
return @intCast(u16, bit_sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn packedIntegerBits(s: Struct, target: Target) u16 {
|
||||||
|
return s.packedFieldBitOffset(target, s.fields.count());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn packedIntegerType(s: Struct, target: Target, buf: *Type.Payload.Bits) Type {
|
||||||
|
buf.* = .{
|
||||||
|
.base = .{ .tag = .int_unsigned },
|
||||||
|
.data = s.packedIntegerBits(target),
|
||||||
|
};
|
||||||
|
return Type.initPayload(&buf.base);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Represents the data that an enum declaration provides, when the fields
|
/// Represents the data that an enum declaration provides, when the fields
|
||||||
|
|||||||
147
src/Sema.zig
147
src/Sema.zig
@ -9701,7 +9701,8 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
|||||||
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||||
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
|
||||||
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||||
const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
const unresolved_operand_ty = try sema.resolveType(block, operand_src, inst_data.operand);
|
||||||
|
const operand_ty = try sema.resolveTypeFields(block, operand_src, unresolved_operand_ty);
|
||||||
const target = sema.mod.getTarget();
|
const target = sema.mod.getTarget();
|
||||||
const bit_size = operand_ty.bitSize(target);
|
const bit_size = operand_ty.bitSize(target);
|
||||||
return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size);
|
return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size);
|
||||||
@ -9891,6 +9892,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||||||
.Fn => {
|
.Fn => {
|
||||||
// TODO: look into memoizing this result.
|
// TODO: look into memoizing this result.
|
||||||
const info = ty.fnInfo();
|
const info = ty.fnInfo();
|
||||||
|
|
||||||
var params_anon_decl = try block.startAnonDecl(src);
|
var params_anon_decl = try block.startAnonDecl(src);
|
||||||
defer params_anon_decl.deinit();
|
defer params_anon_decl.deinit();
|
||||||
|
|
||||||
@ -9948,19 +9950,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||||||
break :v try Value.Tag.decl_ref.create(sema.arena, new_decl);
|
break :v try Value.Tag.decl_ref.create(sema.arena, new_decl);
|
||||||
};
|
};
|
||||||
|
|
||||||
const field_values = try sema.arena.alloc(Value, 6);
|
const field_values = try sema.arena.create([6]Value);
|
||||||
|
field_values.* = .{
|
||||||
// calling_convention: CallingConvention,
|
// calling_convention: CallingConvention,
|
||||||
field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc));
|
try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)),
|
||||||
// alignment: comptime_int,
|
// alignment: comptime_int,
|
||||||
field_values[1] = try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target));
|
try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)),
|
||||||
// is_generic: bool,
|
// is_generic: bool,
|
||||||
field_values[2] = Value.makeBool(info.is_generic);
|
Value.makeBool(info.is_generic),
|
||||||
// is_var_args: bool,
|
// is_var_args: bool,
|
||||||
field_values[3] = Value.makeBool(info.is_var_args);
|
Value.makeBool(info.is_var_args),
|
||||||
// return_type: ?type,
|
// return_type: ?type,
|
||||||
field_values[4] = try Value.Tag.ty.create(sema.arena, info.return_type);
|
try Value.Tag.opt_payload.create(
|
||||||
|
sema.arena,
|
||||||
|
try Value.Tag.ty.create(sema.arena, info.return_type),
|
||||||
|
),
|
||||||
// args: []const Fn.Param,
|
// args: []const Fn.Param,
|
||||||
field_values[5] = args_val;
|
args_val,
|
||||||
|
};
|
||||||
|
|
||||||
return sema.addConstant(
|
return sema.addConstant(
|
||||||
type_info_ty,
|
type_info_ty,
|
||||||
@ -10007,25 +10014,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||||||
const alignment = if (info.@"align" != 0)
|
const alignment = if (info.@"align" != 0)
|
||||||
info.@"align"
|
info.@"align"
|
||||||
else
|
else
|
||||||
info.pointee_type.abiAlignment(target);
|
try sema.typeAbiAlignment(block, src, info.pointee_type);
|
||||||
|
|
||||||
const field_values = try sema.arena.alloc(Value, 8);
|
const field_values = try sema.arena.create([8]Value);
|
||||||
|
field_values.* = .{
|
||||||
// size: Size,
|
// size: Size,
|
||||||
field_values[0] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size));
|
try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)),
|
||||||
// is_const: bool,
|
// is_const: bool,
|
||||||
field_values[1] = Value.makeBool(!info.mutable);
|
Value.makeBool(!info.mutable),
|
||||||
// is_volatile: bool,
|
// is_volatile: bool,
|
||||||
field_values[2] = Value.makeBool(info.@"volatile");
|
Value.makeBool(info.@"volatile"),
|
||||||
// alignment: comptime_int,
|
// alignment: comptime_int,
|
||||||
field_values[3] = try Value.Tag.int_u64.create(sema.arena, alignment);
|
try Value.Tag.int_u64.create(sema.arena, alignment),
|
||||||
// address_space: AddressSpace
|
// address_space: AddressSpace
|
||||||
field_values[4] = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace"));
|
try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")),
|
||||||
// child: type,
|
// child: type,
|
||||||
field_values[5] = try Value.Tag.ty.create(sema.arena, info.pointee_type);
|
try Value.Tag.ty.create(sema.arena, info.pointee_type),
|
||||||
// is_allowzero: bool,
|
// is_allowzero: bool,
|
||||||
field_values[6] = Value.makeBool(info.@"allowzero");
|
Value.makeBool(info.@"allowzero"),
|
||||||
// sentinel: ?*const anyopaque,
|
// sentinel: ?*const anyopaque,
|
||||||
field_values[7] = try sema.optRefValue(block, src, info.pointee_type, info.sentinel);
|
try sema.optRefValue(block, src, info.pointee_type, info.sentinel),
|
||||||
|
};
|
||||||
|
|
||||||
return sema.addConstant(
|
return sema.addConstant(
|
||||||
type_info_ty,
|
type_info_ty,
|
||||||
@ -10377,7 +10386,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
|||||||
const default_val_ptr = try sema.optRefValue(block, src, field.ty, opt_default_val);
|
const default_val_ptr = try sema.optRefValue(block, src, field.ty, opt_default_val);
|
||||||
const alignment = switch (layout) {
|
const alignment = switch (layout) {
|
||||||
.Auto, .Extern => field.normalAlignment(target),
|
.Auto, .Extern => field.normalAlignment(target),
|
||||||
.Packed => field.packedAlignment(),
|
.Packed => 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct_field_fields.* = .{
|
struct_field_fields.* = .{
|
||||||
@ -12120,6 +12129,7 @@ fn zirBitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
|||||||
|
|
||||||
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
fn zirOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||||
const offset = try bitOffsetOf(sema, block, inst);
|
const offset = try bitOffsetOf(sema, block, inst);
|
||||||
|
// TODO reminder to make this a compile error for packed structs
|
||||||
return sema.addIntUnsigned(Type.comptime_int, offset / 8);
|
return sema.addIntUnsigned(Type.comptime_int, offset / 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12143,7 +12153,8 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const index = ty.structFields().getIndex(field_name) orelse {
|
const fields = ty.structFields();
|
||||||
|
const index = fields.getIndex(field_name) orelse {
|
||||||
return sema.fail(
|
return sema.fail(
|
||||||
block,
|
block,
|
||||||
rhs_src,
|
rhs_src,
|
||||||
@ -12153,26 +12164,27 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
|
|||||||
};
|
};
|
||||||
|
|
||||||
const target = sema.mod.getTarget();
|
const target = sema.mod.getTarget();
|
||||||
const layout = ty.containerLayout();
|
switch (ty.containerLayout()) {
|
||||||
if (layout == .Packed) {
|
.Packed => {
|
||||||
var it = ty.iteratePackedStructOffsets(target);
|
var bit_sum: u64 = 0;
|
||||||
while (it.next()) |field_offset| {
|
for (fields.values()) |field, i| {
|
||||||
if (field_offset.field == index) {
|
if (i == index) {
|
||||||
return (field_offset.offset * 8) + field_offset.running_bits;
|
return bit_sum;
|
||||||
}
|
}
|
||||||
}
|
bit_sum += field.ty.bitSize(target);
|
||||||
} else {
|
} else unreachable;
|
||||||
|
},
|
||||||
|
else => {
|
||||||
var it = ty.iterateStructOffsets(target);
|
var it = ty.iterateStructOffsets(target);
|
||||||
while (it.next()) |field_offset| {
|
while (it.next()) |field_offset| {
|
||||||
if (field_offset.field == index) {
|
if (field_offset.field == index) {
|
||||||
return field_offset.offset * 8;
|
return field_offset.offset * 8;
|
||||||
}
|
}
|
||||||
|
} else unreachable;
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unreachable;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `true` if the type was a comptime_int.
|
/// Returns `true` if the type was a comptime_int.
|
||||||
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
|
fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
|
||||||
switch (ty.zigTypeTag()) {
|
switch (ty.zigTypeTag()) {
|
||||||
@ -14199,61 +14211,44 @@ fn structFieldPtrByIndex(
|
|||||||
field_src: LazySrcLoc,
|
field_src: LazySrcLoc,
|
||||||
) CompileError!Air.Inst.Ref {
|
) CompileError!Air.Inst.Ref {
|
||||||
const field = struct_obj.fields.values()[field_index];
|
const field = struct_obj.fields.values()[field_index];
|
||||||
|
|
||||||
const struct_ptr_ty = sema.typeOf(struct_ptr);
|
const struct_ptr_ty = sema.typeOf(struct_ptr);
|
||||||
|
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data;
|
||||||
|
|
||||||
var ptr_ty_data: Type.Payload.Pointer.Data = .{
|
var ptr_ty_data: Type.Payload.Pointer.Data = .{
|
||||||
.pointee_type = field.ty,
|
.pointee_type = field.ty,
|
||||||
.mutable = struct_ptr_ty.ptrIsMutable(),
|
.mutable = struct_ptr_ty_info.mutable,
|
||||||
.@"addrspace" = struct_ptr_ty.ptrAddressSpace(),
|
.@"addrspace" = struct_ptr_ty_info.@"addrspace",
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO handle when the struct pointer is overaligned, we should return a potentially
|
// TODO handle when the struct pointer is overaligned, we should return a potentially
|
||||||
// over-aligned field pointer too.
|
// over-aligned field pointer too.
|
||||||
if (struct_obj.layout == .Packed) p: {
|
if (struct_obj.layout == .Packed) {
|
||||||
const target = sema.mod.getTarget();
|
const target = sema.mod.getTarget();
|
||||||
comptime assert(Type.packed_struct_layout_version == 1);
|
comptime assert(Type.packed_struct_layout_version == 2);
|
||||||
|
|
||||||
var offset: u64 = 0;
|
|
||||||
var running_bits: u16 = 0;
|
var running_bits: u16 = 0;
|
||||||
for (struct_obj.fields.values()) |f, i| {
|
for (struct_obj.fields.values()) |f, i| {
|
||||||
if (!(try sema.typeHasRuntimeBits(block, field_src, f.ty))) continue;
|
if (!(try sema.typeHasRuntimeBits(block, field_src, f.ty))) continue;
|
||||||
|
|
||||||
const field_align = f.packedAlignment();
|
|
||||||
if (field_align == 0) {
|
|
||||||
if (i == field_index) {
|
if (i == field_index) {
|
||||||
ptr_ty_data.bit_offset = running_bits;
|
ptr_ty_data.bit_offset = running_bits;
|
||||||
}
|
}
|
||||||
running_bits += @intCast(u16, f.ty.bitSize(target));
|
running_bits += @intCast(u16, f.ty.bitSize(target));
|
||||||
|
}
|
||||||
|
ptr_ty_data.host_size = (running_bits + 7) / 8;
|
||||||
|
|
||||||
|
// If this is a packed struct embedded in another one, we need to offset
|
||||||
|
// the bits against each other.
|
||||||
|
if (struct_ptr_ty_info.host_size != 0) {
|
||||||
|
ptr_ty_data.host_size = struct_ptr_ty_info.host_size;
|
||||||
|
ptr_ty_data.bit_offset += struct_ptr_ty_info.bit_offset;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (running_bits != 0) {
|
if (field.abi_align.tag() != .abi_align_default) {
|
||||||
var int_payload: Type.Payload.Bits = .{
|
ptr_ty_data.@"align" = @intCast(u32, field.abi_align.toUnsignedInt());
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
if (i > field_index) {
|
|
||||||
ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target));
|
|
||||||
break :p;
|
|
||||||
}
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
|
||||||
offset += int_ty.abiSize(target);
|
|
||||||
running_bits = 0;
|
|
||||||
}
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
|
||||||
if (i == field_index) {
|
|
||||||
break :p;
|
|
||||||
}
|
|
||||||
offset += f.ty.abiSize(target);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(running_bits != 0);
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target));
|
|
||||||
}
|
|
||||||
const ptr_field_ty = try Type.ptr(sema.arena, ptr_ty_data);
|
const ptr_field_ty = try Type.ptr(sema.arena, ptr_ty_data);
|
||||||
|
|
||||||
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
|
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
|
||||||
@ -15849,13 +15844,18 @@ fn beginComptimePtrLoad(
|
|||||||
fn bitCast(
|
fn bitCast(
|
||||||
sema: *Sema,
|
sema: *Sema,
|
||||||
block: *Block,
|
block: *Block,
|
||||||
dest_ty: Type,
|
dest_ty_unresolved: Type,
|
||||||
inst: Air.Inst.Ref,
|
inst: Air.Inst.Ref,
|
||||||
inst_src: LazySrcLoc,
|
inst_src: LazySrcLoc,
|
||||||
) CompileError!Air.Inst.Ref {
|
) CompileError!Air.Inst.Ref {
|
||||||
|
const dest_ty = try sema.resolveTypeFields(block, inst_src, dest_ty_unresolved);
|
||||||
|
try sema.resolveTypeLayout(block, inst_src, dest_ty);
|
||||||
|
|
||||||
|
const old_ty = try sema.resolveTypeFields(block, inst_src, sema.typeOf(inst));
|
||||||
|
try sema.resolveTypeLayout(block, inst_src, old_ty);
|
||||||
|
|
||||||
// TODO validate the type size and other compile errors
|
// TODO validate the type size and other compile errors
|
||||||
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
|
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
|
||||||
const old_ty = sema.typeOf(inst);
|
|
||||||
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty);
|
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty);
|
||||||
return sema.addConstant(dest_ty, result_val);
|
return sema.addConstant(dest_ty, result_val);
|
||||||
}
|
}
|
||||||
@ -17506,6 +17506,9 @@ fn semaStructFields(
|
|||||||
// But only resolve the source location if we need to emit a compile error.
|
// But only resolve the source location if we need to emit a compile error.
|
||||||
try sema.resolveType(&block_scope, src, field_type_ref);
|
try sema.resolveType(&block_scope, src, field_type_ref);
|
||||||
|
|
||||||
|
// TODO emit compile errors for invalid field types
|
||||||
|
// such as arrays and pointers inside packed structs.
|
||||||
|
|
||||||
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
|
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
|
||||||
assert(!gop.found_existing);
|
assert(!gop.found_existing);
|
||||||
gop.value_ptr.* = .{
|
gop.value_ptr.* = .{
|
||||||
@ -18690,6 +18693,12 @@ pub fn typeHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn typeAbiAlignment(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !u32 {
|
||||||
|
try sema.resolveTypeLayout(block, src, ty);
|
||||||
|
const target = sema.mod.getTarget();
|
||||||
|
return ty.abiAlignment(target);
|
||||||
|
}
|
||||||
|
|
||||||
/// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
|
/// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
|
||||||
pub fn fnHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
|
pub fn fnHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
|
||||||
const fn_info = ty.fnInfo();
|
const fn_info = ty.fnInfo();
|
||||||
|
|||||||
@ -865,8 +865,12 @@ pub const DeclGen = struct {
|
|||||||
};
|
};
|
||||||
return dg.context.structType(&fields, fields.len, .False);
|
return dg.context.structType(&fields, fields.len, .False);
|
||||||
}
|
}
|
||||||
const llvm_addrspace = dg.llvmAddressSpace(t.ptrAddressSpace());
|
const ptr_info = t.ptrInfo().data;
|
||||||
const elem_ty = t.childType();
|
const llvm_addrspace = dg.llvmAddressSpace(ptr_info.@"addrspace");
|
||||||
|
if (ptr_info.host_size != 0) {
|
||||||
|
return dg.context.intType(ptr_info.host_size * 8).pointerType(llvm_addrspace);
|
||||||
|
}
|
||||||
|
const elem_ty = ptr_info.pointee_type;
|
||||||
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
|
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
|
||||||
.Opaque, .Fn => true,
|
.Opaque, .Fn => true,
|
||||||
.Array => elem_ty.childType().hasRuntimeBits(),
|
.Array => elem_ty.childType().hasRuntimeBits(),
|
||||||
@ -977,6 +981,14 @@ pub const DeclGen = struct {
|
|||||||
|
|
||||||
const struct_obj = t.castTag(.@"struct").?.data;
|
const struct_obj = t.castTag(.@"struct").?.data;
|
||||||
|
|
||||||
|
if (struct_obj.layout == .Packed) {
|
||||||
|
var buf: Type.Payload.Bits = undefined;
|
||||||
|
const int_ty = struct_obj.packedIntegerType(target, &buf);
|
||||||
|
const int_llvm_ty = try dg.llvmType(int_ty);
|
||||||
|
gop.value_ptr.* = int_llvm_ty;
|
||||||
|
return int_llvm_ty;
|
||||||
|
}
|
||||||
|
|
||||||
const name = try struct_obj.getFullyQualifiedName(gpa);
|
const name = try struct_obj.getFullyQualifiedName(gpa);
|
||||||
defer gpa.free(name);
|
defer gpa.free(name);
|
||||||
|
|
||||||
@ -988,84 +1000,10 @@ pub const DeclGen = struct {
|
|||||||
var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, struct_obj.fields.count());
|
var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, struct_obj.fields.count());
|
||||||
defer llvm_field_types.deinit(gpa);
|
defer llvm_field_types.deinit(gpa);
|
||||||
|
|
||||||
if (struct_obj.layout == .Packed) {
|
|
||||||
try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count() * 2);
|
|
||||||
comptime assert(Type.packed_struct_layout_version == 1);
|
|
||||||
var offset: u64 = 0;
|
|
||||||
var big_align: u32 = 0;
|
|
||||||
var running_bits: u16 = 0;
|
|
||||||
for (struct_obj.fields.values()) |field| {
|
|
||||||
if (!field.ty.hasRuntimeBits()) continue;
|
|
||||||
|
|
||||||
const field_align = field.packedAlignment();
|
|
||||||
if (field_align == 0) {
|
|
||||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
|
||||||
} else {
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
big_align = @maximum(big_align, int_align);
|
|
||||||
const llvm_int_ty = try dg.llvmType(int_ty);
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_field_types.appendAssumeCapacity(padding);
|
|
||||||
}
|
|
||||||
llvm_field_types.appendAssumeCapacity(llvm_int_ty);
|
|
||||||
offset += int_ty.abiSize(target);
|
|
||||||
running_bits = 0;
|
|
||||||
}
|
|
||||||
big_align = @maximum(big_align, field_align);
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_field_types.appendAssumeCapacity(padding);
|
|
||||||
}
|
|
||||||
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
|
|
||||||
offset += field.ty.abiSize(target);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
big_align = @maximum(big_align, int_align);
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_field_types.appendAssumeCapacity(padding);
|
|
||||||
}
|
|
||||||
const llvm_int_ty = try dg.llvmType(int_ty);
|
|
||||||
llvm_field_types.appendAssumeCapacity(llvm_int_ty);
|
|
||||||
}
|
|
||||||
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_field_types.appendAssumeCapacity(padding);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (struct_obj.fields.values()) |field| {
|
for (struct_obj.fields.values()) |field| {
|
||||||
if (!field.ty.hasRuntimeBits()) continue;
|
if (!field.ty.hasRuntimeBits()) continue;
|
||||||
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
|
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
llvm_struct_ty.structSetBody(
|
llvm_struct_ty.structSetBody(
|
||||||
llvm_field_types.items.ptr,
|
llvm_field_types.items.ptr,
|
||||||
@ -1521,27 +1459,21 @@ pub const DeclGen = struct {
|
|||||||
const llvm_struct_ty = try dg.llvmType(tv.ty);
|
const llvm_struct_ty = try dg.llvmType(tv.ty);
|
||||||
const field_vals = tv.val.castTag(.@"struct").?.data;
|
const field_vals = tv.val.castTag(.@"struct").?.data;
|
||||||
const gpa = dg.gpa;
|
const gpa = dg.gpa;
|
||||||
const llvm_field_count = llvm_struct_ty.countStructElementTypes();
|
|
||||||
|
|
||||||
var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count);
|
|
||||||
defer llvm_fields.deinit(gpa);
|
|
||||||
|
|
||||||
var need_unnamed = false;
|
|
||||||
const struct_obj = tv.ty.castTag(.@"struct").?.data;
|
const struct_obj = tv.ty.castTag(.@"struct").?.data;
|
||||||
|
|
||||||
if (struct_obj.layout == .Packed) {
|
if (struct_obj.layout == .Packed) {
|
||||||
const target = dg.module.getTarget();
|
const target = dg.module.getTarget();
|
||||||
|
var int_ty_buf: Type.Payload.Bits = undefined;
|
||||||
|
const int_ty = struct_obj.packedIntegerType(target, &int_ty_buf);
|
||||||
|
const int_llvm_ty = try dg.llvmType(int_ty);
|
||||||
const fields = struct_obj.fields.values();
|
const fields = struct_obj.fields.values();
|
||||||
comptime assert(Type.packed_struct_layout_version == 1);
|
comptime assert(Type.packed_struct_layout_version == 2);
|
||||||
var offset: u64 = 0;
|
var running_int: *const llvm.Value = int_llvm_ty.constNull();
|
||||||
var big_align: u32 = 0;
|
|
||||||
var running_bits: u16 = 0;
|
var running_bits: u16 = 0;
|
||||||
var running_int: *const llvm.Value = llvm_struct_ty.structGetTypeAtIndex(0).constNull();
|
|
||||||
for (field_vals) |field_val, i| {
|
for (field_vals) |field_val, i| {
|
||||||
const field = fields[i];
|
const field = fields[i];
|
||||||
if (!field.ty.hasRuntimeBits()) continue;
|
if (!field.ty.hasRuntimeBits()) continue;
|
||||||
|
|
||||||
const field_align = field.packedAlignment();
|
|
||||||
if (field_align == 0) {
|
|
||||||
const non_int_val = try dg.genTypedValue(.{
|
const non_int_val = try dg.genTypedValue(.{
|
||||||
.ty = field.ty,
|
.ty = field.ty,
|
||||||
.val = field_val,
|
.val = field_val,
|
||||||
@ -1549,75 +1481,20 @@ pub const DeclGen = struct {
|
|||||||
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
|
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
|
||||||
const small_int_ty = dg.context.intType(ty_bit_size);
|
const small_int_ty = dg.context.intType(ty_bit_size);
|
||||||
const small_int_val = non_int_val.constBitCast(small_int_ty);
|
const small_int_val = non_int_val.constBitCast(small_int_ty);
|
||||||
const big_int_ty = running_int.typeOf();
|
const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
|
||||||
const shift_rhs = big_int_ty.constInt(running_bits, .False);
|
const extended_int_val = small_int_val.constZExt(int_llvm_ty);
|
||||||
const extended_int_val = small_int_val.constZExt(big_int_ty);
|
|
||||||
const shifted = extended_int_val.constShl(shift_rhs);
|
const shifted = extended_int_val.constShl(shift_rhs);
|
||||||
running_int = running_int.constOr(shifted);
|
running_int = running_int.constOr(shifted);
|
||||||
running_bits += ty_bit_size;
|
running_bits += ty_bit_size;
|
||||||
} else {
|
|
||||||
big_align = @maximum(big_align, field_align);
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
big_align = @maximum(big_align, int_align);
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_fields.appendAssumeCapacity(padding.getUndef());
|
|
||||||
}
|
}
|
||||||
llvm_fields.appendAssumeCapacity(running_int);
|
return running_int;
|
||||||
running_int = llvm_struct_ty.structGetTypeAtIndex(@intCast(c_uint, llvm_fields.items.len)).constNull();
|
|
||||||
offset += int_ty.abiSize(target);
|
|
||||||
running_bits = 0;
|
|
||||||
}
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_fields.appendAssumeCapacity(padding.getUndef());
|
|
||||||
}
|
|
||||||
llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{
|
|
||||||
.ty = field.ty,
|
|
||||||
.val = field_val,
|
|
||||||
}));
|
|
||||||
offset += field.ty.abiSize(target);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
big_align = @maximum(big_align, int_align);
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
|
||||||
llvm_fields.appendAssumeCapacity(padding.getUndef());
|
|
||||||
}
|
|
||||||
llvm_fields.appendAssumeCapacity(running_int);
|
|
||||||
offset += int_ty.abiSize(target);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const prev_offset = offset;
|
const llvm_field_count = llvm_struct_ty.countStructElementTypes();
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
|
var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count);
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
defer llvm_fields.deinit(gpa);
|
||||||
if (padding_bytes != 0) {
|
|
||||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
var need_unnamed = false;
|
||||||
llvm_fields.appendAssumeCapacity(padding.getUndef());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (field_vals) |field_val, i| {
|
for (field_vals) |field_val, i| {
|
||||||
const field_ty = tv.ty.structFieldType(i);
|
const field_ty = tv.ty.structFieldType(i);
|
||||||
if (!field_ty.hasRuntimeBits()) continue;
|
if (!field_ty.hasRuntimeBits()) continue;
|
||||||
@ -1631,7 +1508,6 @@ pub const DeclGen = struct {
|
|||||||
|
|
||||||
llvm_fields.appendAssumeCapacity(field_llvm_val);
|
llvm_fields.appendAssumeCapacity(field_llvm_val);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (need_unnamed) {
|
if (need_unnamed) {
|
||||||
return dg.context.constStruct(
|
return dg.context.constStruct(
|
||||||
@ -2923,11 +2799,28 @@ pub const FuncGen = struct {
|
|||||||
if (!isByRef(struct_ty)) {
|
if (!isByRef(struct_ty)) {
|
||||||
assert(!isByRef(field_ty));
|
assert(!isByRef(field_ty));
|
||||||
switch (struct_ty.zigTypeTag()) {
|
switch (struct_ty.zigTypeTag()) {
|
||||||
.Struct => {
|
.Struct => switch (struct_ty.containerLayout()) {
|
||||||
|
.Packed => {
|
||||||
|
const struct_obj = struct_ty.castTag(.@"struct").?.data;
|
||||||
|
const bit_offset = struct_obj.packedFieldBitOffset(target, field_index);
|
||||||
|
const containing_int = struct_llvm_val;
|
||||||
|
const shift_amt = containing_int.typeOf().constInt(bit_offset, .False);
|
||||||
|
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
|
||||||
|
const elem_llvm_ty = try self.dg.llvmType(field_ty);
|
||||||
|
if (field_ty.zigTypeTag() == .Float) {
|
||||||
|
const elem_bits = @intCast(c_uint, field_ty.bitSize(target));
|
||||||
|
const same_size_int = self.context.intType(elem_bits);
|
||||||
|
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
|
||||||
|
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
|
||||||
|
}
|
||||||
|
return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
|
||||||
|
},
|
||||||
|
else => {
|
||||||
var ptr_ty_buf: Type.Payload.Pointer = undefined;
|
var ptr_ty_buf: Type.Payload.Pointer = undefined;
|
||||||
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
|
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
|
||||||
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
|
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.Union => {
|
.Union => {
|
||||||
return self.todo("airStructFieldVal byval union", .{});
|
return self.todo("airStructFieldVal byval union", .{});
|
||||||
},
|
},
|
||||||
@ -2937,6 +2830,7 @@ pub const FuncGen = struct {
|
|||||||
|
|
||||||
switch (struct_ty.zigTypeTag()) {
|
switch (struct_ty.zigTypeTag()) {
|
||||||
.Struct => {
|
.Struct => {
|
||||||
|
assert(struct_ty.containerLayout() != .Packed);
|
||||||
var ptr_ty_buf: Type.Payload.Pointer = undefined;
|
var ptr_ty_buf: Type.Payload.Pointer = undefined;
|
||||||
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
|
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
|
||||||
const field_ptr = self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
|
const field_ptr = self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
|
||||||
@ -4928,7 +4822,20 @@ pub const FuncGen = struct {
|
|||||||
) !?*const llvm.Value {
|
) !?*const llvm.Value {
|
||||||
const struct_ty = struct_ptr_ty.childType();
|
const struct_ty = struct_ptr_ty.childType();
|
||||||
switch (struct_ty.zigTypeTag()) {
|
switch (struct_ty.zigTypeTag()) {
|
||||||
.Struct => {
|
.Struct => switch (struct_ty.containerLayout()) {
|
||||||
|
.Packed => {
|
||||||
|
// From LLVM's perspective, a pointer to a packed struct and a pointer
|
||||||
|
// to a field of a packed struct are the same. The difference is in the
|
||||||
|
// Zig pointer type which provides information for how to mask and shift
|
||||||
|
// out the relevant bits when accessing the pointee.
|
||||||
|
// Here we perform a bitcast because we want to use the host_size
|
||||||
|
// as the llvm pointer element type.
|
||||||
|
const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
|
||||||
|
// TODO this can be removed if we change host_size to be bits instead
|
||||||
|
// of bytes.
|
||||||
|
return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
|
||||||
|
},
|
||||||
|
else => {
|
||||||
const target = self.dg.module.getTarget();
|
const target = self.dg.module.getTarget();
|
||||||
var ty_buf: Type.Payload.Pointer = undefined;
|
var ty_buf: Type.Payload.Pointer = undefined;
|
||||||
if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
|
if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
|
||||||
@ -4944,6 +4851,7 @@ pub const FuncGen = struct {
|
|||||||
return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, "");
|
return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, "");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index),
|
.Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index),
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
}
|
||||||
@ -5373,7 +5281,8 @@ fn llvmFieldIndex(
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||||
if (struct_obj.layout != .Packed) {
|
assert(struct_obj.layout != .Packed);
|
||||||
|
|
||||||
var llvm_field_index: c_uint = 0;
|
var llvm_field_index: c_uint = 0;
|
||||||
for (struct_obj.fields.values()) |field, i| {
|
for (struct_obj.fields.values()) |field, i| {
|
||||||
if (!field.ty.hasRuntimeBits())
|
if (!field.ty.hasRuntimeBits())
|
||||||
@ -5397,80 +5306,6 @@ fn llvmFieldIndex(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Our job here is to return the host integer field index.
|
|
||||||
comptime assert(Type.packed_struct_layout_version == 1);
|
|
||||||
var offset: u64 = 0;
|
|
||||||
var running_bits: u16 = 0;
|
|
||||||
var llvm_field_index: c_uint = 0;
|
|
||||||
for (struct_obj.fields.values()) |field, i| {
|
|
||||||
if (!field.ty.hasRuntimeBits())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
const field_align = field.packedAlignment();
|
|
||||||
if (field_align == 0) {
|
|
||||||
if (i == field_index) {
|
|
||||||
ptr_pl_buf.* = .{
|
|
||||||
.data = .{
|
|
||||||
.pointee_type = field.ty,
|
|
||||||
.bit_offset = running_bits,
|
|
||||||
.@"addrspace" = .generic,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
|
||||||
} else {
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
if (i > field_index) {
|
|
||||||
ptr_pl_buf.data.host_size = @intCast(u16, int_ty.abiSize(target));
|
|
||||||
return llvm_field_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
llvm_field_index += 1;
|
|
||||||
}
|
|
||||||
llvm_field_index += 1;
|
|
||||||
offset += int_ty.abiSize(target);
|
|
||||||
running_bits = 0;
|
|
||||||
}
|
|
||||||
const prev_offset = offset;
|
|
||||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
|
||||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
|
||||||
if (padding_bytes != 0) {
|
|
||||||
llvm_field_index += 1;
|
|
||||||
}
|
|
||||||
if (i == field_index) {
|
|
||||||
ptr_pl_buf.* = .{
|
|
||||||
.data = .{
|
|
||||||
.pointee_type = field.ty,
|
|
||||||
.@"align" = field_align,
|
|
||||||
.@"addrspace" = .generic,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return llvm_field_index;
|
|
||||||
}
|
|
||||||
llvm_field_index += 1;
|
|
||||||
offset += field.ty.abiSize(target);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(running_bits != 0);
|
|
||||||
var int_payload: Type.Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
ptr_pl_buf.data.host_size = @intCast(u16, int_ty.abiSize(target));
|
|
||||||
return llvm_field_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
|
fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
|
||||||
switch (fn_info.cc) {
|
switch (fn_info.cc) {
|
||||||
.Unspecified, .Inline => return isByRef(fn_info.return_type),
|
.Unspecified, .Inline => return isByRef(fn_info.return_type),
|
||||||
@ -5518,6 +5353,9 @@ fn isByRef(ty: Type) bool {
|
|||||||
|
|
||||||
.Array, .Frame => return ty.hasRuntimeBits(),
|
.Array, .Frame => return ty.hasRuntimeBits(),
|
||||||
.Struct => {
|
.Struct => {
|
||||||
|
// Packed structs are represented to LLVM as integers.
|
||||||
|
if (ty.containerLayout() == .Packed) return false;
|
||||||
|
|
||||||
if (!ty.hasRuntimeBits()) return false;
|
if (!ty.hasRuntimeBits()) return false;
|
||||||
if (ty.castTag(.tuple)) |tuple| {
|
if (ty.castTag(.tuple)) |tuple| {
|
||||||
var count: usize = 0;
|
var count: usize = 0;
|
||||||
|
|||||||
171
src/type.zig
171
src/type.zig
@ -1952,13 +1952,16 @@ pub const Type = extern union {
|
|||||||
|
|
||||||
.@"struct" => {
|
.@"struct" => {
|
||||||
const fields = self.structFields();
|
const fields = self.structFields();
|
||||||
const is_packed = if (self.castTag(.@"struct")) |payload| p: {
|
if (self.castTag(.@"struct")) |payload| {
|
||||||
const struct_obj = payload.data;
|
const struct_obj = payload.data;
|
||||||
assert(struct_obj.haveLayout());
|
assert(struct_obj.haveLayout());
|
||||||
break :p struct_obj.layout == .Packed;
|
if (struct_obj.layout == .Packed) {
|
||||||
} else false;
|
var buf: Type.Payload.Bits = undefined;
|
||||||
|
const int_ty = struct_obj.packedIntegerType(target, &buf);
|
||||||
|
return int_ty.abiAlignment(target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!is_packed) {
|
|
||||||
var big_align: u32 = 0;
|
var big_align: u32 = 0;
|
||||||
for (fields.values()) |field| {
|
for (fields.values()) |field| {
|
||||||
if (!field.ty.hasRuntimeBits()) continue;
|
if (!field.ty.hasRuntimeBits()) continue;
|
||||||
@ -1967,44 +1970,6 @@ pub const Type = extern union {
|
|||||||
big_align = @maximum(big_align, field_align);
|
big_align = @maximum(big_align, field_align);
|
||||||
}
|
}
|
||||||
return big_align;
|
return big_align;
|
||||||
}
|
|
||||||
|
|
||||||
// For packed structs, we take the maximum alignment of the backing integers.
|
|
||||||
comptime assert(Type.packed_struct_layout_version == 1);
|
|
||||||
var big_align: u32 = 0;
|
|
||||||
var running_bits: u16 = 0;
|
|
||||||
|
|
||||||
for (fields.values()) |field| {
|
|
||||||
if (!field.ty.hasRuntimeBits()) continue;
|
|
||||||
|
|
||||||
const field_align = field.packedAlignment();
|
|
||||||
if (field_align == 0) {
|
|
||||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
|
||||||
} else {
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
big_align = @maximum(big_align, int_align);
|
|
||||||
running_bits = 0;
|
|
||||||
}
|
|
||||||
big_align = @maximum(big_align, field_align);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (running_bits != 0) {
|
|
||||||
var int_payload: Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
big_align = @maximum(big_align, int_align);
|
|
||||||
}
|
|
||||||
return big_align;
|
|
||||||
},
|
},
|
||||||
|
|
||||||
.tuple => {
|
.tuple => {
|
||||||
@ -2090,13 +2055,21 @@ pub const Type = extern union {
|
|||||||
.void,
|
.void,
|
||||||
=> 0,
|
=> 0,
|
||||||
|
|
||||||
.@"struct", .tuple => {
|
.@"struct", .tuple => switch (self.containerLayout()) {
|
||||||
|
.Packed => {
|
||||||
|
const struct_obj = self.castTag(.@"struct").?.data;
|
||||||
|
var buf: Type.Payload.Bits = undefined;
|
||||||
|
const int_ty = struct_obj.packedIntegerType(target, &buf);
|
||||||
|
return int_ty.abiSize(target);
|
||||||
|
},
|
||||||
|
else => {
|
||||||
const field_count = self.structFieldCount();
|
const field_count = self.structFieldCount();
|
||||||
if (field_count == 0) {
|
if (field_count == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return self.structFieldOffset(field_count, target);
|
return self.structFieldOffset(field_count, target);
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
|
||||||
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
|
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
|
||||||
var buffer: Payload.Bits = undefined;
|
var buffer: Payload.Bits = undefined;
|
||||||
@ -2264,7 +2237,6 @@ pub const Type = extern union {
|
|||||||
.fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
|
.fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
|
||||||
.function => unreachable, // represents machine code; not a pointer
|
.function => unreachable, // represents machine code; not a pointer
|
||||||
.anyopaque => unreachable,
|
.anyopaque => unreachable,
|
||||||
.void => unreachable,
|
|
||||||
.type => unreachable,
|
.type => unreachable,
|
||||||
.comptime_int => unreachable,
|
.comptime_int => unreachable,
|
||||||
.comptime_float => unreachable,
|
.comptime_float => unreachable,
|
||||||
@ -2282,19 +2254,26 @@ pub const Type = extern union {
|
|||||||
.generic_poison => unreachable,
|
.generic_poison => unreachable,
|
||||||
.bound_fn => unreachable,
|
.bound_fn => unreachable,
|
||||||
|
|
||||||
|
.void => 0,
|
||||||
|
|
||||||
.@"struct" => {
|
.@"struct" => {
|
||||||
const field_count = ty.structFieldCount();
|
const field_count = ty.structFieldCount();
|
||||||
if (field_count == 0) return 0;
|
if (field_count == 0) return 0;
|
||||||
|
|
||||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||||
assert(struct_obj.haveLayout());
|
assert(struct_obj.haveFieldTypes());
|
||||||
|
|
||||||
|
switch (struct_obj.layout) {
|
||||||
|
.Auto, .Extern => {
|
||||||
var total: u64 = 0;
|
var total: u64 = 0;
|
||||||
for (struct_obj.fields.values()) |field| {
|
for (struct_obj.fields.values()) |field| {
|
||||||
total += field.ty.bitSize(target);
|
total += field.ty.bitSize(target);
|
||||||
}
|
}
|
||||||
return total;
|
return total;
|
||||||
},
|
},
|
||||||
|
.Packed => return struct_obj.packedIntegerBits(target),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
.tuple => {
|
.tuple => {
|
||||||
@panic("TODO bitSize tuples");
|
@panic("TODO bitSize tuples");
|
||||||
@ -4039,78 +4018,6 @@ pub const Type = extern union {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const PackedFieldOffset = struct {
|
|
||||||
field: usize,
|
|
||||||
offset: u64,
|
|
||||||
running_bits: u16,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const PackedStructOffsetIterator = struct {
|
|
||||||
field: usize = 0,
|
|
||||||
offset: u64 = 0,
|
|
||||||
big_align: u32 = 0,
|
|
||||||
running_bits: u16 = 0,
|
|
||||||
struct_obj: *Module.Struct,
|
|
||||||
target: Target,
|
|
||||||
|
|
||||||
pub fn next(it: *PackedStructOffsetIterator) ?PackedFieldOffset {
|
|
||||||
comptime assert(Type.packed_struct_layout_version == 1);
|
|
||||||
if (it.struct_obj.fields.count() <= it.field)
|
|
||||||
return null;
|
|
||||||
|
|
||||||
const field = it.struct_obj.fields.values()[it.field];
|
|
||||||
defer it.field += 1;
|
|
||||||
if (!field.ty.hasRuntimeBits()) {
|
|
||||||
return PackedFieldOffset{
|
|
||||||
.field = it.field,
|
|
||||||
.offset = it.offset,
|
|
||||||
.running_bits = it.running_bits,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
const field_align = field.packedAlignment();
|
|
||||||
if (field_align == 0) {
|
|
||||||
defer it.running_bits += @intCast(u16, field.ty.bitSize(it.target));
|
|
||||||
return PackedFieldOffset{
|
|
||||||
.field = it.field,
|
|
||||||
.offset = it.offset,
|
|
||||||
.running_bits = it.running_bits,
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
it.big_align = @maximum(it.big_align, field_align);
|
|
||||||
|
|
||||||
if (it.running_bits != 0) {
|
|
||||||
var int_payload: Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = it.running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(it.target);
|
|
||||||
it.big_align = @maximum(it.big_align, int_align);
|
|
||||||
it.offset = std.mem.alignForwardGeneric(u64, it.offset, int_align);
|
|
||||||
it.offset += int_ty.abiSize(it.target);
|
|
||||||
it.running_bits = 0;
|
|
||||||
}
|
|
||||||
it.offset = std.mem.alignForwardGeneric(u64, it.offset, field_align);
|
|
||||||
defer it.offset += field.ty.abiSize(it.target);
|
|
||||||
return PackedFieldOffset{
|
|
||||||
.field = it.field,
|
|
||||||
.offset = it.offset,
|
|
||||||
.running_bits = it.running_bits,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Get an iterator that iterates over all the struct field, returning the field and
|
|
||||||
/// offset of that field. Asserts that the type is a none packed struct.
|
|
||||||
pub fn iteratePackedStructOffsets(ty: Type, target: Target) PackedStructOffsetIterator {
|
|
||||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
|
||||||
assert(struct_obj.haveLayout());
|
|
||||||
assert(struct_obj.layout == .Packed);
|
|
||||||
return .{ .struct_obj = struct_obj, .target = target };
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const FieldOffset = struct {
|
pub const FieldOffset = struct {
|
||||||
field: usize,
|
field: usize,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
@ -4150,14 +4057,12 @@ pub const Type = extern union {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Supports structs and unions.
|
/// Supports structs and unions.
|
||||||
/// For packed structs, it returns the byte offset of the containing integer.
|
|
||||||
pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
|
pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
|
||||||
switch (ty.tag()) {
|
switch (ty.tag()) {
|
||||||
.@"struct" => {
|
.@"struct" => {
|
||||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||||
assert(struct_obj.haveLayout());
|
assert(struct_obj.haveLayout());
|
||||||
const is_packed = struct_obj.layout == .Packed;
|
assert(struct_obj.layout != .Packed);
|
||||||
if (!is_packed) {
|
|
||||||
var it = ty.iterateStructOffsets(target);
|
var it = ty.iterateStructOffsets(target);
|
||||||
while (it.next()) |field_offset| {
|
while (it.next()) |field_offset| {
|
||||||
if (index == field_offset.field)
|
if (index == field_offset.field)
|
||||||
@ -4165,27 +4070,6 @@ pub const Type = extern union {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return std.mem.alignForwardGeneric(u64, it.offset, it.big_align);
|
return std.mem.alignForwardGeneric(u64, it.offset, it.big_align);
|
||||||
}
|
|
||||||
|
|
||||||
var it = ty.iteratePackedStructOffsets(target);
|
|
||||||
while (it.next()) |field_offset| {
|
|
||||||
if (index == field_offset.field)
|
|
||||||
return field_offset.offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it.running_bits != 0) {
|
|
||||||
var int_payload: Payload.Bits = .{
|
|
||||||
.base = .{ .tag = .int_unsigned },
|
|
||||||
.data = it.running_bits,
|
|
||||||
};
|
|
||||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
|
||||||
const int_align = int_ty.abiAlignment(target);
|
|
||||||
it.big_align = @maximum(it.big_align, int_align);
|
|
||||||
it.offset = std.mem.alignForwardGeneric(u64, it.offset, int_align);
|
|
||||||
it.offset += int_ty.abiSize(target);
|
|
||||||
}
|
|
||||||
it.offset = std.mem.alignForwardGeneric(u64, it.offset, it.big_align);
|
|
||||||
return it.offset;
|
|
||||||
},
|
},
|
||||||
|
|
||||||
.tuple => {
|
.tuple => {
|
||||||
@ -4734,6 +4618,9 @@ pub const Type = extern union {
|
|||||||
/// an appropriate value for this field.
|
/// an appropriate value for this field.
|
||||||
@"addrspace": std.builtin.AddressSpace,
|
@"addrspace": std.builtin.AddressSpace,
|
||||||
bit_offset: u16 = 0,
|
bit_offset: u16 = 0,
|
||||||
|
/// If this is non-zero it means the pointer points to a sub-byte
|
||||||
|
/// range of data, which is backed by a "host integer" with this
|
||||||
|
/// number of bytes.
|
||||||
host_size: u16 = 0,
|
host_size: u16 = 0,
|
||||||
@"allowzero": bool = false,
|
@"allowzero": bool = false,
|
||||||
mutable: bool = true, // TODO rename this to const, not mutable
|
mutable: bool = true, // TODO rename this to const, not mutable
|
||||||
@ -4953,7 +4840,7 @@ pub const Type = extern union {
|
|||||||
|
|
||||||
/// This is only used for comptime asserts. Bump this number when you make a change
|
/// This is only used for comptime asserts. Bump this number when you make a change
|
||||||
/// to packed struct layout to find out all the places in the codebase you need to edit!
|
/// to packed struct layout to find out all the places in the codebase you need to edit!
|
||||||
pub const packed_struct_layout_version = 1;
|
pub const packed_struct_layout_version = 2;
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const CType = enum {
|
pub const CType = enum {
|
||||||
|
|||||||
136
src/value.zig
136
src/value.zig
@ -1078,7 +1078,9 @@ pub const Value = extern union {
|
|||||||
buf_off += elem_size;
|
buf_off += elem_size;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
.Struct => {
|
.Struct => switch (ty.containerLayout()) {
|
||||||
|
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||||
|
.Extern => {
|
||||||
const fields = ty.structFields().values();
|
const fields = ty.structFields().values();
|
||||||
const field_vals = val.castTag(.@"struct").?.data;
|
const field_vals = val.castTag(.@"struct").?.data;
|
||||||
for (fields) |field, i| {
|
for (fields) |field, i| {
|
||||||
@ -1086,10 +1088,62 @@ pub const Value = extern union {
|
|||||||
writeToMemory(field_vals[i], field.ty, target, buffer[off..]);
|
writeToMemory(field_vals[i], field.ty, target, buffer[off..]);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
.Packed => {
|
||||||
|
// TODO allocate enough heap space instead of using this buffer
|
||||||
|
// on the stack.
|
||||||
|
var buf: [16]std.math.big.Limb = undefined;
|
||||||
|
const host_int = packedStructToInt(val, ty, target, &buf);
|
||||||
|
const abi_size = @intCast(usize, ty.abiSize(target));
|
||||||
|
const bit_size = @intCast(usize, ty.bitSize(target));
|
||||||
|
host_int.writeTwosComplement(buffer, bit_size, abi_size, target.cpu.arch.endian());
|
||||||
|
},
|
||||||
|
},
|
||||||
else => @panic("TODO implement writeToMemory for more types"),
|
else => @panic("TODO implement writeToMemory for more types"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn packedStructToInt(val: Value, ty: Type, target: Target, buf: []std.math.big.Limb) BigIntConst {
|
||||||
|
var bigint = BigIntMutable.init(buf, 0);
|
||||||
|
const fields = ty.structFields().values();
|
||||||
|
const field_vals = val.castTag(.@"struct").?.data;
|
||||||
|
var bits: u16 = 0;
|
||||||
|
// TODO allocate enough heap space instead of using this buffer
|
||||||
|
// on the stack.
|
||||||
|
var field_buf: [16]std.math.big.Limb = undefined;
|
||||||
|
var field_space: BigIntSpace = undefined;
|
||||||
|
var field_buf2: [16]std.math.big.Limb = undefined;
|
||||||
|
for (fields) |field, i| {
|
||||||
|
const field_val = field_vals[i];
|
||||||
|
const field_bigint_const = switch (field.ty.zigTypeTag()) {
|
||||||
|
.Float => switch (field.ty.floatBits(target)) {
|
||||||
|
16 => bitcastFloatToBigInt(f16, val.toFloat(f16), &field_buf),
|
||||||
|
32 => bitcastFloatToBigInt(f32, val.toFloat(f32), &field_buf),
|
||||||
|
64 => bitcastFloatToBigInt(f64, val.toFloat(f64), &field_buf),
|
||||||
|
80 => bitcastFloatToBigInt(f80, val.toFloat(f80), &field_buf),
|
||||||
|
128 => bitcastFloatToBigInt(f128, val.toFloat(f128), &field_buf),
|
||||||
|
else => unreachable,
|
||||||
|
},
|
||||||
|
.Int, .Bool => field_val.toBigInt(&field_space),
|
||||||
|
.Struct => packedStructToInt(field_val, field.ty, target, &field_buf),
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
var field_bigint = BigIntMutable.init(&field_buf2, 0);
|
||||||
|
field_bigint.shiftLeft(field_bigint_const, bits);
|
||||||
|
bits += @intCast(u16, field.ty.bitSize(target));
|
||||||
|
bigint.bitOr(bigint.toConst(), field_bigint.toConst());
|
||||||
|
}
|
||||||
|
return bigint.toConst();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bitcastFloatToBigInt(comptime F: type, f: F, buf: []std.math.big.Limb) BigIntConst {
|
||||||
|
const Int = @Type(.{ .Int = .{
|
||||||
|
.signedness = .unsigned,
|
||||||
|
.bits = @typeInfo(F).Float.bits,
|
||||||
|
} });
|
||||||
|
const int = @bitCast(Int, f);
|
||||||
|
return BigIntMutable.init(buf, int).toConst();
|
||||||
|
}
|
||||||
|
|
||||||
pub fn readFromMemory(
|
pub fn readFromMemory(
|
||||||
ty: Type,
|
ty: Type,
|
||||||
target: Target,
|
target: Target,
|
||||||
@ -1127,10 +1181,90 @@ pub const Value = extern union {
|
|||||||
}
|
}
|
||||||
return Tag.array.create(arena, elems);
|
return Tag.array.create(arena, elems);
|
||||||
},
|
},
|
||||||
|
.Struct => switch (ty.containerLayout()) {
|
||||||
|
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||||
|
.Extern => {
|
||||||
|
const fields = ty.structFields().values();
|
||||||
|
const field_vals = try arena.alloc(Value, fields.len);
|
||||||
|
for (fields) |field, i| {
|
||||||
|
const off = @intCast(usize, ty.structFieldOffset(i, target));
|
||||||
|
field_vals[i] = try readFromMemory(field.ty, target, buffer[off..], arena);
|
||||||
|
}
|
||||||
|
return Tag.@"struct".create(arena, field_vals);
|
||||||
|
},
|
||||||
|
.Packed => {
|
||||||
|
const endian = target.cpu.arch.endian();
|
||||||
|
const Limb = std.math.big.Limb;
|
||||||
|
const abi_size = @intCast(usize, ty.abiSize(target));
|
||||||
|
const bit_size = @intCast(usize, ty.bitSize(target));
|
||||||
|
const limb_count = (buffer.len + @sizeOf(Limb) - 1) / @sizeOf(Limb);
|
||||||
|
const limbs_buffer = try arena.alloc(Limb, limb_count);
|
||||||
|
var bigint = BigIntMutable.init(limbs_buffer, 0);
|
||||||
|
bigint.readTwosComplement(buffer, bit_size, abi_size, endian, .unsigned);
|
||||||
|
return intToPackedStruct(ty, target, bigint.toConst(), arena);
|
||||||
|
},
|
||||||
|
},
|
||||||
else => @panic("TODO implement readFromMemory for more types"),
|
else => @panic("TODO implement readFromMemory for more types"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn intToPackedStruct(
|
||||||
|
ty: Type,
|
||||||
|
target: Target,
|
||||||
|
bigint: BigIntConst,
|
||||||
|
arena: Allocator,
|
||||||
|
) Allocator.Error!Value {
|
||||||
|
const limbs_buffer = try arena.alloc(std.math.big.Limb, bigint.limbs.len);
|
||||||
|
var bigint_mut = bigint.toMutable(limbs_buffer);
|
||||||
|
const fields = ty.structFields().values();
|
||||||
|
const field_vals = try arena.alloc(Value, fields.len);
|
||||||
|
var bits: u16 = 0;
|
||||||
|
for (fields) |field, i| {
|
||||||
|
const field_bits = @intCast(u16, field.ty.bitSize(target));
|
||||||
|
bigint_mut.shiftRight(bigint, bits);
|
||||||
|
bigint_mut.truncate(bigint_mut.toConst(), .unsigned, field_bits);
|
||||||
|
bits += field_bits;
|
||||||
|
const field_bigint = bigint_mut.toConst();
|
||||||
|
|
||||||
|
field_vals[i] = switch (field.ty.zigTypeTag()) {
|
||||||
|
.Float => switch (field.ty.floatBits(target)) {
|
||||||
|
16 => try bitCastBigIntToFloat(f16, .float_16, field_bigint, arena),
|
||||||
|
32 => try bitCastBigIntToFloat(f32, .float_32, field_bigint, arena),
|
||||||
|
64 => try bitCastBigIntToFloat(f64, .float_64, field_bigint, arena),
|
||||||
|
80 => try bitCastBigIntToFloat(f80, .float_80, field_bigint, arena),
|
||||||
|
128 => try bitCastBigIntToFloat(f128, .float_128, field_bigint, arena),
|
||||||
|
else => unreachable,
|
||||||
|
},
|
||||||
|
.Bool => makeBool(!field_bigint.eqZero()),
|
||||||
|
.Int => try Tag.int_big_positive.create(
|
||||||
|
arena,
|
||||||
|
try arena.dupe(std.math.big.Limb, field_bigint.limbs),
|
||||||
|
),
|
||||||
|
.Struct => try intToPackedStruct(field.ty, target, field_bigint, arena),
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return Tag.@"struct".create(arena, field_vals);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bitCastBigIntToFloat(
|
||||||
|
comptime F: type,
|
||||||
|
comptime float_tag: Tag,
|
||||||
|
bigint: BigIntConst,
|
||||||
|
arena: Allocator,
|
||||||
|
) !Value {
|
||||||
|
const Int = @Type(.{ .Int = .{
|
||||||
|
.signedness = .unsigned,
|
||||||
|
.bits = @typeInfo(F).Float.bits,
|
||||||
|
} });
|
||||||
|
const int = bigint.to(Int) catch |err| switch (err) {
|
||||||
|
error.NegativeIntoUnsigned => unreachable,
|
||||||
|
error.TargetTooSmall => unreachable,
|
||||||
|
};
|
||||||
|
const f = @bitCast(F, int);
|
||||||
|
return float_tag.create(arena, f);
|
||||||
|
}
|
||||||
|
|
||||||
fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void {
|
fn floatWriteToMemory(comptime F: type, f: F, target: Target, buffer: []u8) void {
|
||||||
if (F == f80) {
|
if (F == f80) {
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
|
|||||||
@ -473,8 +473,8 @@ test "type deduction for array subscript expression" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "sentinel element count towards the ABI size calculation" {
|
test "sentinel element count towards the ABI size calculation" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
@ -482,7 +482,7 @@ test "sentinel element count towards the ABI size calculation" {
|
|||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
fn doTheTest() !void {
|
fn doTheTest() !void {
|
||||||
const T = packed struct {
|
const T = extern struct {
|
||||||
fill_pre: u8 = 0x55,
|
fill_pre: u8 = 0x55,
|
||||||
data: [0:0]u8 = undefined,
|
data: [0:0]u8 = undefined,
|
||||||
fill_post: u8 = 0xAA,
|
fill_post: u8 = 0xAA,
|
||||||
@ -500,7 +500,7 @@ test "sentinel element count towards the ABI size calculation" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "zero-sized array with recursive type definition" {
|
test "zero-sized array with recursive type definition" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
@ -525,7 +525,7 @@ test "zero-sized array with recursive type definition" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "type coercion of anon struct literal to array" {
|
test "type coercion of anon struct literal to array" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
@ -561,8 +561,8 @@ test "type coercion of anon struct literal to array" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "type coercion of pointer to anon struct literal to pointer to array" {
|
test "type coercion of pointer to anon struct literal to pointer to array" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
|||||||
@ -132,7 +132,6 @@ test "bitcast generates a temporary value" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "@bitCast packed structs at runtime and comptime" {
|
test "@bitCast packed structs at runtime and comptime" {
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
@ -170,7 +169,6 @@ test "@bitCast packed structs at runtime and comptime" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "@bitCast extern structs at runtime and comptime" {
|
test "@bitCast extern structs at runtime and comptime" {
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
@ -205,7 +203,6 @@ test "@bitCast extern structs at runtime and comptime" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "bitcast packed struct to integer and back" {
|
test "bitcast packed struct to integer and back" {
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
@ -246,7 +243,6 @@ test "implicit cast to error union by returning" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "bitcast packed struct literal to byte" {
|
test "bitcast packed struct literal to byte" {
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
@ -261,7 +257,6 @@ test "bitcast packed struct literal to byte" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "comptime bitcast used in expression has the correct type" {
|
test "comptime bitcast used in expression has the correct type" {
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
|
|||||||
@ -210,13 +210,13 @@ test "branching logic inside @TypeOf" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "@bitSizeOf" {
|
test "@bitSizeOf" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
|
||||||
|
|
||||||
try expect(@bitSizeOf(u2) == 2);
|
try expect(@bitSizeOf(u2) == 2);
|
||||||
try expect(@bitSizeOf(u8) == @sizeOf(u8) * 8);
|
try expect(@bitSizeOf(u8) == @sizeOf(u8) * 8);
|
||||||
try expect(@bitSizeOf(struct {
|
try expect(@bitSizeOf(struct {
|
||||||
a: u2,
|
a: u2,
|
||||||
}) == 8);
|
}) == 2);
|
||||||
try expect(@bitSizeOf(packed struct {
|
try expect(@bitSizeOf(packed struct {
|
||||||
a: u2,
|
a: u2,
|
||||||
}) == 2);
|
}) == 2);
|
||||||
|
|||||||
@ -268,25 +268,6 @@ test "struct field init with catch" {
|
|||||||
comptime try S.doTheTest();
|
comptime try S.doTheTest();
|
||||||
}
|
}
|
||||||
|
|
||||||
test "packed struct field alignment" {
|
|
||||||
if (builtin.object_format == .c) return error.SkipZigTest;
|
|
||||||
|
|
||||||
const Stage1 = struct {
|
|
||||||
var baz: packed struct {
|
|
||||||
a: u32,
|
|
||||||
b: u32,
|
|
||||||
} = undefined;
|
|
||||||
};
|
|
||||||
const Stage2 = struct {
|
|
||||||
var baz: packed struct {
|
|
||||||
a: u32,
|
|
||||||
b: u32 align(1),
|
|
||||||
} = undefined;
|
|
||||||
};
|
|
||||||
const S = if (builtin.zig_backend != .stage1) Stage2 else Stage1;
|
|
||||||
try expect(@TypeOf(&S.baz.b) == *align(1) u32);
|
|
||||||
}
|
|
||||||
|
|
||||||
const blah: packed struct {
|
const blah: packed struct {
|
||||||
a: u3,
|
a: u3,
|
||||||
b: u3,
|
b: u3,
|
||||||
@ -687,48 +668,52 @@ test "default struct initialization fields" {
|
|||||||
try expect(1239 == x.a + x.b);
|
try expect(1239 == x.a + x.b);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO revisit this test when doing https://github.com/ziglang/zig/issues/1512
|
|
||||||
test "packed array 24bits" {
|
test "packed array 24bits" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
|
||||||
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||||
|
|
||||||
comptime {
|
comptime {
|
||||||
try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
|
try expect(@sizeOf([9]Foo32Bits) == 9 * 4);
|
||||||
try expect(@sizeOf(FooArray24Bits) == 2 + 2 * 4 + 2);
|
try expect(@sizeOf(FooArray24Bits) == @sizeOf(u96));
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
|
var bytes = [_]u8{0} ** (@sizeOf(FooArray24Bits) + 1);
|
||||||
bytes[bytes.len - 1] = 0xaa;
|
bytes[bytes.len - 1] = 0xbb;
|
||||||
const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
|
const ptr = &std.mem.bytesAsSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
|
||||||
try expect(ptr.a == 0);
|
try expect(ptr.a == 0);
|
||||||
try expect(ptr.b[0].field == 0);
|
try expect(ptr.b0.field == 0);
|
||||||
try expect(ptr.b[1].field == 0);
|
try expect(ptr.b1.field == 0);
|
||||||
try expect(ptr.c == 0);
|
try expect(ptr.c == 0);
|
||||||
|
|
||||||
ptr.a = maxInt(u16);
|
ptr.a = maxInt(u16);
|
||||||
try expect(ptr.a == maxInt(u16));
|
try expect(ptr.a == maxInt(u16));
|
||||||
try expect(ptr.b[0].field == 0);
|
try expect(ptr.b0.field == 0);
|
||||||
try expect(ptr.b[1].field == 0);
|
try expect(ptr.b1.field == 0);
|
||||||
try expect(ptr.c == 0);
|
try expect(ptr.c == 0);
|
||||||
|
|
||||||
ptr.b[0].field = maxInt(u24);
|
ptr.b0.field = maxInt(u24);
|
||||||
try expect(ptr.a == maxInt(u16));
|
try expect(ptr.a == maxInt(u16));
|
||||||
try expect(ptr.b[0].field == maxInt(u24));
|
try expect(ptr.b0.field == maxInt(u24));
|
||||||
try expect(ptr.b[1].field == 0);
|
try expect(ptr.b1.field == 0);
|
||||||
try expect(ptr.c == 0);
|
try expect(ptr.c == 0);
|
||||||
|
|
||||||
ptr.b[1].field = maxInt(u24);
|
ptr.b1.field = maxInt(u24);
|
||||||
try expect(ptr.a == maxInt(u16));
|
try expect(ptr.a == maxInt(u16));
|
||||||
try expect(ptr.b[0].field == maxInt(u24));
|
try expect(ptr.b0.field == maxInt(u24));
|
||||||
try expect(ptr.b[1].field == maxInt(u24));
|
try expect(ptr.b1.field == maxInt(u24));
|
||||||
try expect(ptr.c == 0);
|
try expect(ptr.c == 0);
|
||||||
|
|
||||||
ptr.c = maxInt(u16);
|
ptr.c = maxInt(u16);
|
||||||
try expect(ptr.a == maxInt(u16));
|
try expect(ptr.a == maxInt(u16));
|
||||||
try expect(ptr.b[0].field == maxInt(u24));
|
try expect(ptr.b0.field == maxInt(u24));
|
||||||
try expect(ptr.b[1].field == maxInt(u24));
|
try expect(ptr.b1.field == maxInt(u24));
|
||||||
try expect(ptr.c == maxInt(u16));
|
try expect(ptr.c == maxInt(u16));
|
||||||
|
|
||||||
try expect(bytes[bytes.len - 1] == 0xaa);
|
try expect(bytes[bytes.len - 1] == 0xbb);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Foo32Bits = packed struct {
|
const Foo32Bits = packed struct {
|
||||||
@ -738,12 +723,16 @@ const Foo32Bits = packed struct {
|
|||||||
|
|
||||||
const FooArray24Bits = packed struct {
|
const FooArray24Bits = packed struct {
|
||||||
a: u16,
|
a: u16,
|
||||||
b: [2]Foo32Bits,
|
b0: Foo32Bits,
|
||||||
|
b1: Foo32Bits,
|
||||||
c: u16,
|
c: u16,
|
||||||
};
|
};
|
||||||
|
|
||||||
test "aligned array of packed struct" {
|
test "aligned array of packed struct" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
comptime {
|
comptime {
|
||||||
try expect(@sizeOf(FooStructAligned) == 2);
|
try expect(@sizeOf(FooStructAligned) == 2);
|
||||||
@ -769,7 +758,10 @@ const FooArrayOfAligned = packed struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
test "pointer to packed struct member in a stack variable" {
|
test "pointer to packed struct member in a stack variable" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
const S = packed struct {
|
const S = packed struct {
|
||||||
a: u2,
|
a: u2,
|
||||||
@ -783,32 +775,12 @@ test "pointer to packed struct member in a stack variable" {
|
|||||||
try expect(s.b == 2);
|
try expect(s.b == 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
test "non-byte-aligned array inside packed struct" {
|
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
|
||||||
|
|
||||||
const Foo = packed struct {
|
|
||||||
a: bool,
|
|
||||||
b: [0x16]u8,
|
|
||||||
};
|
|
||||||
const S = struct {
|
|
||||||
fn bar(slice: []const u8) !void {
|
|
||||||
try expectEqualSlices(u8, slice, "abcdefghijklmnopqurstu");
|
|
||||||
}
|
|
||||||
fn doTheTest() !void {
|
|
||||||
var foo = Foo{
|
|
||||||
.a = true,
|
|
||||||
.b = "abcdefghijklmnopqurstu".*,
|
|
||||||
};
|
|
||||||
const value = foo.b;
|
|
||||||
try bar(&value);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
try S.doTheTest();
|
|
||||||
comptime try S.doTheTest();
|
|
||||||
}
|
|
||||||
|
|
||||||
test "packed struct with u0 field access" {
|
test "packed struct with u0 field access" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
const S = packed struct {
|
const S = packed struct {
|
||||||
f0: u0,
|
f0: u0,
|
||||||
@ -818,7 +790,11 @@ test "packed struct with u0 field access" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "access to global struct fields" {
|
test "access to global struct fields" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
g_foo.bar.value = 42;
|
g_foo.bar.value = 42;
|
||||||
try expect(g_foo.bar.value == 42);
|
try expect(g_foo.bar.value == 42);
|
||||||
@ -839,26 +815,32 @@ const S0 = struct {
|
|||||||
var g_foo: S0 = S0.init();
|
var g_foo: S0 = S0.init();
|
||||||
|
|
||||||
test "packed struct with fp fields" {
|
test "packed struct with fp fields" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
const S = packed struct {
|
const S = packed struct {
|
||||||
data: [3]f32,
|
data0: f32,
|
||||||
|
data1: f32,
|
||||||
|
data2: f32,
|
||||||
|
|
||||||
pub fn frob(self: *@This()) void {
|
pub fn frob(self: *@This()) void {
|
||||||
self.data[0] += self.data[1] + self.data[2];
|
self.data0 += self.data1 + self.data2;
|
||||||
self.data[1] += self.data[0] + self.data[2];
|
self.data1 += self.data0 + self.data2;
|
||||||
self.data[2] += self.data[0] + self.data[1];
|
self.data2 += self.data0 + self.data1;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var s: S = undefined;
|
var s: S = undefined;
|
||||||
s.data[0] = 1.0;
|
s.data0 = 1.0;
|
||||||
s.data[1] = 2.0;
|
s.data1 = 2.0;
|
||||||
s.data[2] = 3.0;
|
s.data2 = 3.0;
|
||||||
s.frob();
|
s.frob();
|
||||||
try expectEqual(@as(f32, 6.0), s.data[0]);
|
try expect(@as(f32, 6.0) == s.data0);
|
||||||
try expectEqual(@as(f32, 11.0), s.data[1]);
|
try expect(@as(f32, 11.0) == s.data1);
|
||||||
try expectEqual(@as(f32, 20.0), s.data[2]);
|
try expect(@as(f32, 20.0) == s.data2);
|
||||||
}
|
}
|
||||||
|
|
||||||
test "fn with C calling convention returns struct by value" {
|
test "fn with C calling convention returns struct by value" {
|
||||||
@ -906,7 +888,11 @@ test "non-packed struct with u128 entry in union" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "packed struct field passed to generic function" {
|
test "packed struct field passed to generic function" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
const S = struct {
|
const S = struct {
|
||||||
const P = packed struct {
|
const P = packed struct {
|
||||||
@ -1046,8 +1032,8 @@ test "struct with union field" {
|
|||||||
var True = Value{
|
var True = Value{
|
||||||
.kind = .{ .Bool = true },
|
.kind = .{ .Bool = true },
|
||||||
};
|
};
|
||||||
try expectEqual(@as(u32, 2), True.ref);
|
try expect(@as(u32, 2) == True.ref);
|
||||||
try expectEqual(true, True.kind.Bool);
|
try expect(True.kind.Bool);
|
||||||
}
|
}
|
||||||
|
|
||||||
test "type coercion of anon struct literal to struct" {
|
test "type coercion of anon struct literal to struct" {
|
||||||
|
|||||||
@ -274,7 +274,7 @@ const TestStruct = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
test "type info: packed struct info" {
|
test "type info: packed struct info" {
|
||||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
|
||||||
|
|
||||||
try testPackedStruct();
|
try testPackedStruct();
|
||||||
comptime try testPackedStruct();
|
comptime try testPackedStruct();
|
||||||
@ -286,19 +286,19 @@ fn testPackedStruct() !void {
|
|||||||
try expect(struct_info.Struct.is_tuple == false);
|
try expect(struct_info.Struct.is_tuple == false);
|
||||||
try expect(struct_info.Struct.layout == .Packed);
|
try expect(struct_info.Struct.layout == .Packed);
|
||||||
try expect(struct_info.Struct.fields.len == 4);
|
try expect(struct_info.Struct.fields.len == 4);
|
||||||
try expect(struct_info.Struct.fields[0].alignment == 2 * @alignOf(usize));
|
try expect(struct_info.Struct.fields[0].alignment == 0);
|
||||||
try expect(struct_info.Struct.fields[2].field_type == *TestPackedStruct);
|
try expect(struct_info.Struct.fields[2].field_type == f32);
|
||||||
try expect(struct_info.Struct.fields[2].default_value == null);
|
try expect(struct_info.Struct.fields[2].default_value == null);
|
||||||
try expect(@ptrCast(*const u32, struct_info.Struct.fields[3].default_value.?).* == 4);
|
try expect(@ptrCast(*const u32, struct_info.Struct.fields[3].default_value.?).* == 4);
|
||||||
try expect(struct_info.Struct.fields[3].alignment == 1);
|
try expect(struct_info.Struct.fields[3].alignment == 0);
|
||||||
try expect(struct_info.Struct.decls.len == 2);
|
try expect(struct_info.Struct.decls.len == 2);
|
||||||
try expect(struct_info.Struct.decls[0].is_pub);
|
try expect(struct_info.Struct.decls[0].is_pub);
|
||||||
}
|
}
|
||||||
|
|
||||||
const TestPackedStruct = packed struct {
|
const TestPackedStruct = packed struct {
|
||||||
fieldA: usize align(2 * @alignOf(usize)),
|
fieldA: usize,
|
||||||
fieldB: void,
|
fieldB: void,
|
||||||
fieldC: *Self,
|
fieldC: f32,
|
||||||
fieldD: u32 = 4,
|
fieldD: u32 = 4,
|
||||||
|
|
||||||
pub fn foo(self: *const Self) void {
|
pub fn foo(self: *const Self) void {
|
||||||
@ -329,6 +329,7 @@ test "type info: function type info" {
|
|||||||
|
|
||||||
// wasm doesn't support align attributes on functions
|
// wasm doesn't support align attributes on functions
|
||||||
if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
|
if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
|
||||||
|
|
||||||
try testFunction();
|
try testFunction();
|
||||||
comptime try testFunction();
|
comptime try testFunction();
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user