mirror of
https://github.com/ziglang/zig.git
synced 2026-02-14 13:30:45 +00:00
Merge pull request #12379 from ifreund/packed-struct-explicit-backing-int
stage2: Implement explicit backing integers for packed structs
This commit is contained in:
commit
e0178890ba
@ -294,6 +294,8 @@ pub const Type = union(enum) {
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Struct = struct {
|
||||
layout: ContainerLayout,
|
||||
/// Only valid if layout is .Packed
|
||||
backing_integer: ?type = null,
|
||||
fields: []const StructField,
|
||||
decls: []const Declaration,
|
||||
is_tuple: bool,
|
||||
|
||||
@ -2967,7 +2967,7 @@ pub const Node = struct {
|
||||
/// Same as ContainerDeclTwo except there is known to be a trailing comma
|
||||
/// or semicolon before the rbrace.
|
||||
container_decl_two_trailing,
|
||||
/// `union(lhs)` / `enum(lhs)`. `SubRange[rhs]`.
|
||||
/// `struct(lhs)` / `union(lhs)` / `enum(lhs)`. `SubRange[rhs]`.
|
||||
container_decl_arg,
|
||||
/// Same as container_decl_arg but there is known to be a trailing
|
||||
/// comma or semicolon before the rbrace.
|
||||
|
||||
@ -3356,16 +3356,18 @@ const Parser = struct {
|
||||
}
|
||||
|
||||
/// Caller must have already verified the first token.
|
||||
/// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
|
||||
///
|
||||
/// ContainerDeclType
|
||||
/// <- KEYWORD_struct
|
||||
/// <- KEYWORD_struct (LPAREN Expr RPAREN)?
|
||||
/// / KEYWORD_opaque
|
||||
/// / KEYWORD_enum (LPAREN Expr RPAREN)?
|
||||
/// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
|
||||
/// / KEYWORD_opaque
|
||||
fn parseContainerDeclAuto(p: *Parser) !Node.Index {
|
||||
const main_token = p.nextToken();
|
||||
const arg_expr = switch (p.token_tags[main_token]) {
|
||||
.keyword_struct, .keyword_opaque => null_node,
|
||||
.keyword_enum => blk: {
|
||||
.keyword_opaque => null_node,
|
||||
.keyword_struct, .keyword_enum => blk: {
|
||||
if (p.eatToken(.l_paren)) |_| {
|
||||
const expr = try p.expectExpr();
|
||||
_ = try p.expectToken(.r_paren);
|
||||
|
||||
@ -3064,6 +3064,13 @@ test "zig fmt: struct declaration" {
|
||||
\\ c: u8,
|
||||
\\};
|
||||
\\
|
||||
\\const Ps = packed struct(u32) {
|
||||
\\ a: u1,
|
||||
\\ b: u2,
|
||||
\\
|
||||
\\ c: u29,
|
||||
\\};
|
||||
\\
|
||||
\\const Es = extern struct {
|
||||
\\ a: u8,
|
||||
\\ b: u8,
|
||||
|
||||
@ -152,6 +152,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
0,
|
||||
tree.containerDeclRoot(),
|
||||
.Auto,
|
||||
0,
|
||||
)) |struct_decl_ref| {
|
||||
assert(refToIndex(struct_decl_ref).? == 0);
|
||||
} else |err| switch (err) {
|
||||
@ -4223,15 +4224,18 @@ fn structDeclInner(
|
||||
node: Ast.Node.Index,
|
||||
container_decl: Ast.full.ContainerDecl,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
backing_int_node: Ast.Node.Index,
|
||||
) InnerError!Zir.Inst.Ref {
|
||||
const decl_inst = try gz.reserveInstructionIndex();
|
||||
|
||||
if (container_decl.ast.members.len == 0) {
|
||||
if (container_decl.ast.members.len == 0 and backing_int_node == 0) {
|
||||
try gz.setStruct(decl_inst, .{
|
||||
.src_node = node,
|
||||
.layout = layout,
|
||||
.fields_len = 0,
|
||||
.decls_len = 0,
|
||||
.backing_int_ref = .none,
|
||||
.backing_int_body_len = 0,
|
||||
.known_non_opv = false,
|
||||
.known_comptime_only = false,
|
||||
});
|
||||
@ -4266,6 +4270,35 @@ fn structDeclInner(
|
||||
};
|
||||
defer block_scope.unstack();
|
||||
|
||||
const scratch_top = astgen.scratch.items.len;
|
||||
defer astgen.scratch.items.len = scratch_top;
|
||||
|
||||
var backing_int_body_len: usize = 0;
|
||||
const backing_int_ref: Zir.Inst.Ref = blk: {
|
||||
if (backing_int_node != 0) {
|
||||
if (layout != .Packed) {
|
||||
return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{});
|
||||
} else {
|
||||
const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node);
|
||||
if (!block_scope.isEmpty()) {
|
||||
if (!block_scope.endsWithNoReturn()) {
|
||||
_ = try block_scope.addBreak(.break_inline, decl_inst, backing_int_ref);
|
||||
}
|
||||
|
||||
const body = block_scope.instructionsSlice();
|
||||
const old_scratch_len = astgen.scratch.items.len;
|
||||
try astgen.scratch.ensureUnusedCapacity(gpa, countBodyLenAfterFixups(astgen, body));
|
||||
appendBodyWithFixupsArrayList(astgen, &astgen.scratch, body);
|
||||
backing_int_body_len = astgen.scratch.items.len - old_scratch_len;
|
||||
block_scope.instructions.items.len = block_scope.instructions_top;
|
||||
}
|
||||
break :blk backing_int_ref;
|
||||
}
|
||||
} else {
|
||||
break :blk .none;
|
||||
}
|
||||
};
|
||||
|
||||
const decl_count = try astgen.scanDecls(&namespace, container_decl.ast.members);
|
||||
const field_count = @intCast(u32, container_decl.ast.members.len - decl_count);
|
||||
|
||||
@ -4378,6 +4411,8 @@ fn structDeclInner(
|
||||
.layout = layout,
|
||||
.fields_len = field_count,
|
||||
.decls_len = decl_count,
|
||||
.backing_int_ref = backing_int_ref,
|
||||
.backing_int_body_len = @intCast(u32, backing_int_body_len),
|
||||
.known_non_opv = known_non_opv,
|
||||
.known_comptime_only = known_comptime_only,
|
||||
});
|
||||
@ -4386,7 +4421,9 @@ fn structDeclInner(
|
||||
const decls_slice = wip_members.declsSlice();
|
||||
const fields_slice = wip_members.fieldsSlice();
|
||||
const bodies_slice = astgen.scratch.items[bodies_start..];
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + fields_slice.len + bodies_slice.len);
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len +
|
||||
decls_slice.len + fields_slice.len + bodies_slice.len);
|
||||
astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
|
||||
astgen.extra.appendSliceAssumeCapacity(decls_slice);
|
||||
astgen.extra.appendSliceAssumeCapacity(fields_slice);
|
||||
astgen.extra.appendSliceAssumeCapacity(bodies_slice);
|
||||
@ -4582,9 +4619,7 @@ fn containerDecl(
|
||||
else => unreachable,
|
||||
} else std.builtin.Type.ContainerLayout.Auto;
|
||||
|
||||
assert(container_decl.ast.arg == 0);
|
||||
|
||||
const result = try structDeclInner(gz, scope, node, container_decl, layout);
|
||||
const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg);
|
||||
return rvalue(gz, rl, result, node);
|
||||
},
|
||||
.keyword_union => {
|
||||
@ -11254,6 +11289,8 @@ const GenZir = struct {
|
||||
src_node: Ast.Node.Index,
|
||||
fields_len: u32,
|
||||
decls_len: u32,
|
||||
backing_int_ref: Zir.Inst.Ref,
|
||||
backing_int_body_len: u32,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
known_non_opv: bool,
|
||||
known_comptime_only: bool,
|
||||
@ -11261,7 +11298,7 @@ const GenZir = struct {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, 4);
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, 6);
|
||||
const payload_index = @intCast(u32, astgen.extra.items.len);
|
||||
|
||||
if (args.src_node != 0) {
|
||||
@ -11274,6 +11311,12 @@ const GenZir = struct {
|
||||
if (args.decls_len != 0) {
|
||||
astgen.extra.appendAssumeCapacity(args.decls_len);
|
||||
}
|
||||
if (args.backing_int_ref != .none) {
|
||||
astgen.extra.appendAssumeCapacity(args.backing_int_body_len);
|
||||
if (args.backing_int_body_len == 0) {
|
||||
astgen.extra.appendAssumeCapacity(@enumToInt(args.backing_int_ref));
|
||||
}
|
||||
}
|
||||
astgen.instructions.set(inst, .{
|
||||
.tag = .extended,
|
||||
.data = .{ .extended = .{
|
||||
@ -11282,6 +11325,7 @@ const GenZir = struct {
|
||||
.has_src_node = args.src_node != 0,
|
||||
.has_fields_len = args.fields_len != 0,
|
||||
.has_decls_len = args.decls_len != 0,
|
||||
.has_backing_int = args.backing_int_ref != .none,
|
||||
.known_non_opv = args.known_non_opv,
|
||||
.known_comptime_only = args.known_comptime_only,
|
||||
.name_strategy = gz.anon_name_strategy,
|
||||
|
||||
@ -2536,6 +2536,17 @@ fn walkInstruction(
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
|
||||
// TODO: Expose explicit backing integer types in some way.
|
||||
if (small.has_backing_int) {
|
||||
const backing_int_body_len = file.zir.extra[extra_index];
|
||||
extra_index += 1; // backing_int_body_len
|
||||
if (backing_int_body_len == 0) {
|
||||
extra_index += 1; // backing_int_ref
|
||||
} else {
|
||||
extra_index += backing_int_body_len; // backing_int_body_inst
|
||||
}
|
||||
}
|
||||
|
||||
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
|
||||
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
|
||||
|
||||
|
||||
@ -895,6 +895,11 @@ pub const Struct = struct {
|
||||
zir_index: Zir.Inst.Index,
|
||||
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
/// If the layout is not packed, this is the noreturn type.
|
||||
/// If the layout is packed, this is the backing integer type of the packed struct.
|
||||
/// Whether zig chooses this type or the user specifies it, it is stored here.
|
||||
/// This will be set to the noreturn type until status is `have_layout`.
|
||||
backing_int_ty: Type = Type.initTag(.noreturn),
|
||||
status: enum {
|
||||
none,
|
||||
field_types_wip,
|
||||
@ -1025,7 +1030,7 @@ pub const Struct = struct {
|
||||
|
||||
pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 {
|
||||
assert(s.layout == .Packed);
|
||||
assert(s.haveFieldTypes());
|
||||
assert(s.haveLayout());
|
||||
var bit_sum: u64 = 0;
|
||||
for (s.fields.values()) |field, i| {
|
||||
if (i == index) {
|
||||
@ -1033,19 +1038,7 @@ pub const Struct = struct {
|
||||
}
|
||||
bit_sum += field.ty.bitSize(target);
|
||||
}
|
||||
return @intCast(u16, bit_sum);
|
||||
}
|
||||
|
||||
pub fn packedIntegerBits(s: Struct, target: Target) u16 {
|
||||
return s.packedFieldBitOffset(target, s.fields.count());
|
||||
}
|
||||
|
||||
pub fn packedIntegerType(s: Struct, target: Target, buf: *Type.Payload.Bits) Type {
|
||||
buf.* = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = s.packedIntegerBits(target),
|
||||
};
|
||||
return Type.initPayload(&buf.base);
|
||||
unreachable; // index out of bounds
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
266
src/Sema.zig
266
src/Sema.zig
@ -78,6 +78,7 @@ post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
|
||||
err: ?*Module.ErrorMsg = null,
|
||||
|
||||
const std = @import("std");
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
@ -2238,6 +2239,16 @@ pub fn analyzeStructDecl(
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
|
||||
if (small.has_backing_int) {
|
||||
const backing_int_body_len = sema.code.extra[extra_index];
|
||||
extra_index += 1; // backing_int_body_len
|
||||
if (backing_int_body_len == 0) {
|
||||
extra_index += 1; // backing_int_ref
|
||||
} else {
|
||||
extra_index += backing_int_body_len; // backing_int_body_inst
|
||||
}
|
||||
}
|
||||
|
||||
_ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl);
|
||||
}
|
||||
|
||||
@ -11822,7 +11833,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
return sema.failWithDivideByZero(block, rhs_src);
|
||||
}
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target);
|
||||
const rem_result = try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src);
|
||||
// If this answer could possibly be different by doing `intMod`,
|
||||
// we must emit a compile error. Otherwise, it's OK.
|
||||
if ((try rhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) != (try lhs_val.compareWithZeroAdvanced(.lt, sema.kit(block, src))) and
|
||||
@ -11884,6 +11895,60 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
||||
}
|
||||
|
||||
fn intRem(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
ty: Type,
|
||||
lhs: Value,
|
||||
lhs_src: LazySrcLoc,
|
||||
rhs: Value,
|
||||
rhs_src: LazySrcLoc,
|
||||
) CompileError!Value {
|
||||
if (ty.zigTypeTag() == .Vector) {
|
||||
const result_data = try sema.arena.alloc(Value, ty.vectorLen());
|
||||
for (result_data) |*scalar, i| {
|
||||
scalar.* = try sema.intRemScalar(block, lhs.indexVectorlike(i), lhs_src, rhs.indexVectorlike(i), rhs_src);
|
||||
}
|
||||
return Value.Tag.aggregate.create(sema.arena, result_data);
|
||||
}
|
||||
return sema.intRemScalar(block, lhs, lhs_src, rhs, rhs_src);
|
||||
}
|
||||
|
||||
fn intRemScalar(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
lhs: Value,
|
||||
lhs_src: LazySrcLoc,
|
||||
rhs: Value,
|
||||
rhs_src: LazySrcLoc,
|
||||
) CompileError!Value {
|
||||
const target = sema.mod.getTarget();
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema.kit(block, lhs_src));
|
||||
const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema.kit(block, rhs_src));
|
||||
const limbs_q = try sema.arena.alloc(
|
||||
math.big.Limb,
|
||||
lhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_r = try sema.arena.alloc(
|
||||
math.big.Limb,
|
||||
// TODO: consider reworking Sema to re-use Values rather than
|
||||
// always producing new Value objects.
|
||||
rhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_buffer = try sema.arena.alloc(
|
||||
math.big.Limb,
|
||||
math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
||||
);
|
||||
var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
||||
var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
||||
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
return Value.fromBigInt(sema.arena, result_r.toConst());
|
||||
}
|
||||
|
||||
fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
|
||||
@ -12048,7 +12113,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
return sema.addConstant(
|
||||
resolved_type,
|
||||
try lhs_val.intRem(rhs_val, resolved_type, sema.arena, target),
|
||||
try sema.intRem(block, resolved_type, lhs_val, lhs_src, rhs_val, rhs_src),
|
||||
);
|
||||
}
|
||||
break :rs lhs_src;
|
||||
@ -14228,13 +14293,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
|
||||
const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace());
|
||||
|
||||
const field_values = try sema.arena.create([4]Value);
|
||||
const backing_integer_val = blk: {
|
||||
if (layout == .Packed) {
|
||||
const struct_obj = struct_ty.castTag(.@"struct").?.data;
|
||||
assert(struct_obj.haveLayout());
|
||||
assert(struct_obj.backing_int_ty.isInt());
|
||||
const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty);
|
||||
break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val);
|
||||
} else {
|
||||
break :blk Value.initTag(.null_value);
|
||||
}
|
||||
};
|
||||
|
||||
const field_values = try sema.arena.create([5]Value);
|
||||
field_values.* = .{
|
||||
// layout: ContainerLayout,
|
||||
try Value.Tag.enum_field_index.create(
|
||||
sema.arena,
|
||||
@enumToInt(layout),
|
||||
),
|
||||
// backing_integer: ?type,
|
||||
backing_integer_val,
|
||||
// fields: []const StructField,
|
||||
fields_val,
|
||||
// decls: []const Declaration,
|
||||
@ -16251,7 +16330,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
|
||||
if (!try sema.intFitsInType(block, src, alignment_val, Type.u32, null)) {
|
||||
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
|
||||
}
|
||||
const abi_align = @intCast(u29, alignment_val.toUnsignedInt(target));
|
||||
const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema.kit(block, src))).?);
|
||||
|
||||
var buffer: Value.ToTypeBuffer = undefined;
|
||||
const unresolved_elem_ty = child_val.toType(&buffer);
|
||||
@ -16416,22 +16495,31 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in
|
||||
const struct_val = union_val.val.castTag(.aggregate).?.data;
|
||||
// layout: containerlayout,
|
||||
const layout_val = struct_val[0];
|
||||
// backing_int: ?type,
|
||||
const backing_int_val = struct_val[1];
|
||||
// fields: []const enumfield,
|
||||
const fields_val = struct_val[1];
|
||||
const fields_val = struct_val[2];
|
||||
// decls: []const declaration,
|
||||
const decls_val = struct_val[2];
|
||||
const decls_val = struct_val[3];
|
||||
// is_tuple: bool,
|
||||
const is_tuple_val = struct_val[3];
|
||||
const is_tuple_val = struct_val[4];
|
||||
assert(struct_val.len == 5);
|
||||
|
||||
const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout);
|
||||
|
||||
// Decls
|
||||
if (decls_val.sliceLen(mod) > 0) {
|
||||
return sema.fail(block, src, "reified structs must have no decls", .{});
|
||||
}
|
||||
|
||||
if (layout != .Packed and !backing_int_val.isNull()) {
|
||||
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
|
||||
}
|
||||
|
||||
return if (is_tuple_val.toBool())
|
||||
try sema.reifyTuple(block, src, fields_val)
|
||||
else
|
||||
try sema.reifyStruct(block, inst, src, layout_val, fields_val, name_strategy);
|
||||
try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy);
|
||||
},
|
||||
.Enum => {
|
||||
const struct_val = union_val.val.castTag(.aggregate).?.data;
|
||||
@ -16924,7 +17012,8 @@ fn reifyStruct(
|
||||
block: *Block,
|
||||
inst: Zir.Inst.Index,
|
||||
src: LazySrcLoc,
|
||||
layout_val: Value,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
backing_int_val: Value,
|
||||
fields_val: Value,
|
||||
name_strategy: Zir.Inst.NameStrategy,
|
||||
) CompileError!Air.Inst.Ref {
|
||||
@ -16947,7 +17036,7 @@ fn reifyStruct(
|
||||
.owner_decl = new_decl_index,
|
||||
.fields = .{},
|
||||
.zir_index = inst,
|
||||
.layout = layout_val.toEnum(std.builtin.Type.ContainerLayout),
|
||||
.layout = layout,
|
||||
.status = .have_field_types,
|
||||
.known_non_opv = false,
|
||||
.namespace = .{
|
||||
@ -17013,6 +17102,41 @@ fn reifyStruct(
|
||||
};
|
||||
}
|
||||
|
||||
if (layout == .Packed) {
|
||||
struct_obj.status = .layout_wip;
|
||||
|
||||
for (struct_obj.fields.values()) |field, index| {
|
||||
sema.resolveTypeLayout(block, src, field.ty) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
const msg = sema.err orelse return err;
|
||||
try sema.addFieldErrNote(block, struct_ty, index, msg, "while checking this field", .{});
|
||||
return err;
|
||||
},
|
||||
else => return err,
|
||||
};
|
||||
}
|
||||
|
||||
var fields_bit_sum: u64 = 0;
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
fields_bit_sum += field.ty.bitSize(target);
|
||||
}
|
||||
|
||||
if (backing_int_val.optionalValue()) |payload| {
|
||||
var buf: Value.ToTypeBuffer = undefined;
|
||||
const backing_int_ty = payload.toType(&buf);
|
||||
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
|
||||
struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator);
|
||||
} else {
|
||||
var buf: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = @intCast(u16, fields_bit_sum),
|
||||
};
|
||||
struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator);
|
||||
}
|
||||
|
||||
struct_obj.status = .have_layout;
|
||||
}
|
||||
|
||||
try new_decl.finalizeNewArena(&new_decl_arena);
|
||||
return sema.analyzeDeclVal(block, src, new_decl_index);
|
||||
}
|
||||
@ -27109,6 +27233,11 @@ fn resolveStructLayout(
|
||||
else => return err,
|
||||
};
|
||||
}
|
||||
|
||||
if (struct_obj.layout == .Packed) {
|
||||
try semaBackingIntType(sema.mod, struct_obj);
|
||||
}
|
||||
|
||||
struct_obj.status = .have_layout;
|
||||
|
||||
// In case of querying the ABI alignment of this struct, we will ask
|
||||
@ -27128,6 +27257,109 @@ fn resolveStructLayout(
|
||||
// otherwise it's a tuple; no need to resolve anything
|
||||
}
|
||||
|
||||
fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
|
||||
const gpa = mod.gpa;
|
||||
const target = mod.getTarget();
|
||||
|
||||
var fields_bit_sum: u64 = 0;
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
fields_bit_sum += field.ty.bitSize(target);
|
||||
}
|
||||
|
||||
const decl_index = struct_obj.owner_decl;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
var decl_arena = decl.value_arena.?.promote(gpa);
|
||||
defer decl.value_arena.?.* = decl_arena.state;
|
||||
const decl_arena_allocator = decl_arena.allocator();
|
||||
|
||||
const zir = struct_obj.namespace.file_scope.zir;
|
||||
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
|
||||
assert(extended.opcode == .struct_decl);
|
||||
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
|
||||
|
||||
if (small.has_backing_int) {
|
||||
var extra_index: usize = extended.operand;
|
||||
extra_index += @boolToInt(small.has_src_node);
|
||||
extra_index += @boolToInt(small.has_fields_len);
|
||||
extra_index += @boolToInt(small.has_decls_len);
|
||||
|
||||
const backing_int_body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
|
||||
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer analysis_arena.deinit();
|
||||
|
||||
var sema: Sema = .{
|
||||
.mod = mod,
|
||||
.gpa = gpa,
|
||||
.arena = analysis_arena.allocator(),
|
||||
.perm_arena = decl_arena_allocator,
|
||||
.code = zir,
|
||||
.owner_decl = decl,
|
||||
.owner_decl_index = decl_index,
|
||||
.func = null,
|
||||
.fn_ret_ty = Type.void,
|
||||
.owner_func = null,
|
||||
};
|
||||
defer sema.deinit();
|
||||
|
||||
var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
|
||||
defer wip_captures.deinit();
|
||||
|
||||
var block: Block = .{
|
||||
.parent = null,
|
||||
.sema = &sema,
|
||||
.src_decl = decl_index,
|
||||
.namespace = &struct_obj.namespace,
|
||||
.wip_capture_scope = wip_captures.scope,
|
||||
.instructions = .{},
|
||||
.inlining = null,
|
||||
.is_comptime = true,
|
||||
};
|
||||
defer {
|
||||
assert(block.instructions.items.len == 0);
|
||||
block.params.deinit(gpa);
|
||||
}
|
||||
|
||||
const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
|
||||
const backing_int_ty = blk: {
|
||||
if (backing_int_body_len == 0) {
|
||||
const backing_int_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
|
||||
break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
|
||||
} else {
|
||||
const body = zir.extra[extra_index..][0..backing_int_body_len];
|
||||
const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
|
||||
break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
|
||||
}
|
||||
};
|
||||
|
||||
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
|
||||
struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator);
|
||||
} else {
|
||||
var buf: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = @intCast(u16, fields_bit_sum),
|
||||
};
|
||||
struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator);
|
||||
}
|
||||
}
|
||||
|
||||
fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void {
|
||||
const target = sema.mod.getTarget();
|
||||
|
||||
if (!backing_int_ty.isInt()) {
|
||||
return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)});
|
||||
}
|
||||
if (backing_int_ty.bitSize(target) != fields_bit_sum) {
|
||||
return sema.fail(
|
||||
block,
|
||||
src,
|
||||
"backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}",
|
||||
.{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum },
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveUnionLayout(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -27450,12 +27682,26 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
|
||||
break :decls_len decls_len;
|
||||
} else 0;
|
||||
|
||||
// The backing integer cannot be handled until `resolveStructLayout()`.
|
||||
if (small.has_backing_int) {
|
||||
const backing_int_body_len = zir.extra[extra_index];
|
||||
extra_index += 1; // backing_int_body_len
|
||||
if (backing_int_body_len == 0) {
|
||||
extra_index += 1; // backing_int_ref
|
||||
} else {
|
||||
extra_index += backing_int_body_len; // backing_int_body_inst
|
||||
}
|
||||
}
|
||||
|
||||
// Skip over decls.
|
||||
var decls_it = zir.declIteratorInner(extra_index, decls_len);
|
||||
while (decls_it.next()) |_| {}
|
||||
extra_index = decls_it.extra_index;
|
||||
|
||||
if (fields_len == 0) {
|
||||
if (struct_obj.layout == .Packed) {
|
||||
try semaBackingIntType(mod, struct_obj);
|
||||
}
|
||||
struct_obj.status = .have_layout;
|
||||
return;
|
||||
}
|
||||
|
||||
16
src/Zir.zig
16
src/Zir.zig
@ -3085,13 +3085,16 @@ pub const Inst = struct {
|
||||
/// 0. src_node: i32, // if has_src_node
|
||||
/// 1. fields_len: u32, // if has_fields_len
|
||||
/// 2. decls_len: u32, // if has_decls_len
|
||||
/// 3. decl_bits: u32 // for every 8 decls
|
||||
/// 3. backing_int_body_len: u32, // if has_backing_int
|
||||
/// 4. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
|
||||
/// 5. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
|
||||
/// 6. decl_bits: u32 // for every 8 decls
|
||||
/// - sets of 4 bits:
|
||||
/// 0b000X: whether corresponding decl is pub
|
||||
/// 0b00X0: whether corresponding decl is exported
|
||||
/// 0b0X00: whether corresponding decl has an align expression
|
||||
/// 0bX000: whether corresponding decl has a linksection or an address space expression
|
||||
/// 4. decl: { // for every decls_len
|
||||
/// 7. decl: { // for every decls_len
|
||||
/// src_hash: [4]u32, // hash of source bytes
|
||||
/// line: u32, // line number of decl, relative to parent
|
||||
/// name: u32, // null terminated string index
|
||||
@ -3109,13 +3112,13 @@ pub const Inst = struct {
|
||||
/// address_space: Ref,
|
||||
/// }
|
||||
/// }
|
||||
/// 5. flags: u32 // for every 8 fields
|
||||
/// 8. flags: u32 // for every 8 fields
|
||||
/// - sets of 4 bits:
|
||||
/// 0b000X: whether corresponding field has an align expression
|
||||
/// 0b00X0: whether corresponding field has a default expression
|
||||
/// 0b0X00: whether corresponding field is comptime
|
||||
/// 0bX000: whether corresponding field has a type expression
|
||||
/// 6. fields: { // for every fields_len
|
||||
/// 9. fields: { // for every fields_len
|
||||
/// field_name: u32,
|
||||
/// doc_comment: u32, // 0 if no doc comment
|
||||
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
|
||||
@ -3123,7 +3126,7 @@ pub const Inst = struct {
|
||||
/// align_body_len: u32, // if corresponding bit is set
|
||||
/// init_body_len: u32, // if corresponding bit is set
|
||||
/// }
|
||||
/// 7. bodies: { // for every fields_len
|
||||
/// 10. bodies: { // for every fields_len
|
||||
/// field_type_body_inst: Inst, // for each field_type_body_len
|
||||
/// align_body_inst: Inst, // for each align_body_len
|
||||
/// init_body_inst: Inst, // for each init_body_len
|
||||
@ -3133,11 +3136,12 @@ pub const Inst = struct {
|
||||
has_src_node: bool,
|
||||
has_fields_len: bool,
|
||||
has_decls_len: bool,
|
||||
has_backing_int: bool,
|
||||
known_non_opv: bool,
|
||||
known_comptime_only: bool,
|
||||
name_strategy: NameStrategy,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
_: u7 = undefined,
|
||||
_: u6 = undefined,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -1683,8 +1683,7 @@ pub const Object = struct {
|
||||
if (ty.castTag(.@"struct")) |payload| {
|
||||
const struct_obj = payload.data;
|
||||
if (struct_obj.layout == .Packed) {
|
||||
var buf: Type.Payload.Bits = undefined;
|
||||
const info = struct_obj.packedIntegerType(target, &buf).intInfo(target);
|
||||
const info = struct_obj.backing_int_ty.intInfo(target);
|
||||
const dwarf_encoding: c_uint = switch (info.signedness) {
|
||||
.signed => DW.ATE.signed,
|
||||
.unsigned => DW.ATE.unsigned,
|
||||
@ -2679,9 +2678,7 @@ pub const DeclGen = struct {
|
||||
const struct_obj = t.castTag(.@"struct").?.data;
|
||||
|
||||
if (struct_obj.layout == .Packed) {
|
||||
var buf: Type.Payload.Bits = undefined;
|
||||
const int_ty = struct_obj.packedIntegerType(target, &buf);
|
||||
const int_llvm_ty = try dg.lowerType(int_ty);
|
||||
const int_llvm_ty = try dg.lowerType(struct_obj.backing_int_ty);
|
||||
gop.value_ptr.* = int_llvm_ty;
|
||||
return int_llvm_ty;
|
||||
}
|
||||
@ -3330,8 +3327,8 @@ pub const DeclGen = struct {
|
||||
const struct_obj = tv.ty.castTag(.@"struct").?.data;
|
||||
|
||||
if (struct_obj.layout == .Packed) {
|
||||
const big_bits = struct_obj.packedIntegerBits(target);
|
||||
const int_llvm_ty = dg.context.intType(big_bits);
|
||||
const big_bits = struct_obj.backing_int_ty.bitSize(target);
|
||||
const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
|
||||
const fields = struct_obj.fields.values();
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
var running_int: *const llvm.Value = int_llvm_ty.constNull();
|
||||
@ -8243,8 +8240,8 @@ pub const FuncGen = struct {
|
||||
.Struct => {
|
||||
if (result_ty.containerLayout() == .Packed) {
|
||||
const struct_obj = result_ty.castTag(.@"struct").?.data;
|
||||
const big_bits = struct_obj.packedIntegerBits(target);
|
||||
const int_llvm_ty = self.dg.context.intType(big_bits);
|
||||
const big_bits = struct_obj.backing_int_ty.bitSize(target);
|
||||
const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits));
|
||||
const fields = struct_obj.fields.values();
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
var running_int: *const llvm.Value = int_llvm_ty.constNull();
|
||||
|
||||
@ -1245,9 +1245,28 @@ const Writer = struct {
|
||||
|
||||
try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv);
|
||||
try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only);
|
||||
try stream.print("{s}, {s}, ", .{
|
||||
@tagName(small.name_strategy), @tagName(small.layout),
|
||||
});
|
||||
|
||||
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
|
||||
|
||||
if (small.layout == .Packed and small.has_backing_int) {
|
||||
const backing_int_body_len = self.code.extra[extra_index];
|
||||
extra_index += 1;
|
||||
try stream.writeAll("Packed(");
|
||||
if (backing_int_body_len == 0) {
|
||||
const backing_int_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
try self.writeInstRef(stream, backing_int_ref);
|
||||
} else {
|
||||
const body = self.code.extra[extra_index..][0..backing_int_body_len];
|
||||
extra_index += backing_int_body_len;
|
||||
self.indent += 2;
|
||||
try self.writeBracedDecl(stream, body);
|
||||
self.indent -= 2;
|
||||
}
|
||||
try stream.writeAll("), ");
|
||||
} else {
|
||||
try stream.print("{s}, ", .{@tagName(small.layout)});
|
||||
}
|
||||
|
||||
if (decls_len == 0) {
|
||||
try stream.writeAll("{}, ");
|
||||
|
||||
@ -1116,6 +1116,7 @@ struct AstNodeContainerDecl {
|
||||
ContainerLayout layout;
|
||||
|
||||
bool auto_enum, is_root; // union(enum)
|
||||
bool unsupported_explicit_backing_int;
|
||||
};
|
||||
|
||||
struct AstNodeErrorSetField {
|
||||
|
||||
@ -3034,6 +3034,12 @@ static Error resolve_struct_zero_bits(CodeGen *g, ZigType *struct_type) {
|
||||
|
||||
AstNode *decl_node = struct_type->data.structure.decl_node;
|
||||
|
||||
if (decl_node->data.container_decl.unsupported_explicit_backing_int) {
|
||||
add_node_error(g, decl_node, buf_create_from_str(
|
||||
"the stage1 compiler does not support explicit backing integer types on packed structs"));
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
}
|
||||
|
||||
if (struct_type->data.structure.resolve_loop_flag_zero_bits) {
|
||||
if (struct_type->data.structure.resolve_status != ResolveStatusInvalid) {
|
||||
struct_type->data.structure.resolve_status = ResolveStatusInvalid;
|
||||
|
||||
@ -18640,7 +18640,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
|
||||
result->special = ConstValSpecialStatic;
|
||||
result->type = ir_type_info_get_type(ira, "Struct", nullptr);
|
||||
|
||||
ZigValue **fields = alloc_const_vals_ptrs(g, 4);
|
||||
ZigValue **fields = alloc_const_vals_ptrs(g, 5);
|
||||
result->data.x_struct.fields = fields;
|
||||
|
||||
// layout: ContainerLayout
|
||||
@ -18648,8 +18648,17 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
|
||||
fields[0]->special = ConstValSpecialStatic;
|
||||
fields[0]->type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
|
||||
bigint_init_unsigned(&fields[0]->data.x_enum_tag, type_entry->data.structure.layout);
|
||||
|
||||
// backing_integer: ?type
|
||||
ensure_field_index(result->type, "backing_integer", 1);
|
||||
fields[1]->special = ConstValSpecialStatic;
|
||||
fields[1]->type = get_optional_type(g, g->builtin_types.entry_type);
|
||||
// This is always null in stage1, as stage1 does not support explicit backing integers
|
||||
// for packed structs.
|
||||
fields[1]->data.x_optional = nullptr;
|
||||
|
||||
// fields: []Type.StructField
|
||||
ensure_field_index(result->type, "fields", 1);
|
||||
ensure_field_index(result->type, "fields", 2);
|
||||
|
||||
ZigType *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField", nullptr);
|
||||
if ((err = type_resolve(g, type_info_struct_field_type, ResolveStatusSizeKnown))) {
|
||||
@ -18663,7 +18672,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
|
||||
struct_field_array->data.x_array.special = ConstArraySpecialNone;
|
||||
struct_field_array->data.x_array.data.s_none.elements = g->pass1_arena->allocate<ZigValue>(struct_field_count);
|
||||
|
||||
init_const_slice(g, fields[1], struct_field_array, 0, struct_field_count, false, nullptr);
|
||||
init_const_slice(g, fields[2], struct_field_array, 0, struct_field_count, false, nullptr);
|
||||
|
||||
for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
|
||||
TypeStructField *struct_field = type_entry->data.structure.fields[struct_field_index];
|
||||
@ -18710,18 +18719,18 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour
|
||||
struct_field_val->parent.data.p_array.elem_index = struct_field_index;
|
||||
}
|
||||
// decls: []Type.Declaration
|
||||
ensure_field_index(result->type, "decls", 2);
|
||||
if ((err = ir_make_type_info_decls(ira, source_node, fields[2],
|
||||
ensure_field_index(result->type, "decls", 3);
|
||||
if ((err = ir_make_type_info_decls(ira, source_node, fields[3],
|
||||
type_entry->data.structure.decls_scope, false)))
|
||||
{
|
||||
return err;
|
||||
}
|
||||
|
||||
// is_tuple: bool
|
||||
ensure_field_index(result->type, "is_tuple", 3);
|
||||
fields[3]->special = ConstValSpecialStatic;
|
||||
fields[3]->type = g->builtin_types.entry_bool;
|
||||
fields[3]->data.x_bool = is_tuple(type_entry);
|
||||
ensure_field_index(result->type, "is_tuple", 4);
|
||||
fields[4]->special = ConstValSpecialStatic;
|
||||
fields[4]->type = g->builtin_types.entry_bool;
|
||||
fields[4]->data.x_bool = is_tuple(type_entry);
|
||||
|
||||
break;
|
||||
}
|
||||
@ -19313,7 +19322,14 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
|
||||
assert(layout_value->type == ir_type_info_get_type(ira, "ContainerLayout", nullptr));
|
||||
ContainerLayout layout = (ContainerLayout)bigint_as_u32(&layout_value->data.x_enum_tag);
|
||||
|
||||
ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 1);
|
||||
ZigType *tag_type = get_const_field_meta_type_optional(ira, source_node, payload, "backing_integer", 1);
|
||||
if (tag_type != nullptr) {
|
||||
ir_add_error_node(ira, source_node, buf_create_from_str(
|
||||
"the stage1 compiler does not support explicit backing integer types on packed structs"));
|
||||
return ira->codegen->invalid_inst_gen->value->type;
|
||||
}
|
||||
|
||||
ZigValue *fields_value = get_const_field(ira, source_node, payload, "fields", 2);
|
||||
if (fields_value == nullptr)
|
||||
return ira->codegen->invalid_inst_gen->value->type;
|
||||
assert(fields_value->special == ConstValSpecialStatic);
|
||||
@ -19322,7 +19338,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
|
||||
ZigValue *fields_len_value = fields_value->data.x_struct.fields[slice_len_index];
|
||||
size_t fields_len = bigint_as_usize(&fields_len_value->data.x_bigint);
|
||||
|
||||
ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 2);
|
||||
ZigValue *decls_value = get_const_field(ira, source_node, payload, "decls", 3);
|
||||
if (decls_value == nullptr)
|
||||
return ira->codegen->invalid_inst_gen->value->type;
|
||||
assert(decls_value->special == ConstValSpecialStatic);
|
||||
@ -19335,7 +19351,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_
|
||||
}
|
||||
|
||||
bool is_tuple;
|
||||
if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 3, &is_tuple)))
|
||||
if ((err = get_const_field_bool(ira, source_node, payload, "is_tuple", 4, &is_tuple)))
|
||||
return ira->codegen->invalid_inst_gen->value->type;
|
||||
|
||||
ZigType *entry = new_type_table_entry(ZigTypeIdStruct);
|
||||
|
||||
@ -2902,16 +2902,25 @@ static AstNode *ast_parse_container_decl_auto(ParseContext *pc) {
|
||||
}
|
||||
|
||||
// ContainerDeclType
|
||||
// <- KEYWORD_struct
|
||||
// <- KEYWORD_struct (LPAREN Expr RPAREN)?
|
||||
// / KEYWORD_enum (LPAREN Expr RPAREN)?
|
||||
// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
|
||||
// / KEYWORD_opaque
|
||||
static AstNode *ast_parse_container_decl_type(ParseContext *pc) {
|
||||
TokenIndex first = eat_token_if(pc, TokenIdKeywordStruct);
|
||||
if (first != 0) {
|
||||
bool explicit_backing_int = false;
|
||||
if (eat_token_if(pc, TokenIdLParen) != 0) {
|
||||
explicit_backing_int = true;
|
||||
ast_expect(pc, ast_parse_expr);
|
||||
expect_token(pc, TokenIdRParen);
|
||||
}
|
||||
AstNode *res = ast_create_node(pc, NodeTypeContainerDecl, first);
|
||||
res->data.container_decl.init_arg_expr = nullptr;
|
||||
res->data.container_decl.kind = ContainerKindStruct;
|
||||
// We want this to be an error in semantic analysis not parsing to make sharing
|
||||
// the test suite between stage1 and self hosted easier.
|
||||
res->data.container_decl.unsupported_explicit_backing_int = explicit_backing_int;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
23
src/type.zig
23
src/type.zig
@ -3000,9 +3000,17 @@ pub const Type = extern union {
|
||||
.lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) },
|
||||
};
|
||||
if (struct_obj.layout == .Packed) {
|
||||
var buf: Type.Payload.Bits = undefined;
|
||||
const int_ty = struct_obj.packedIntegerType(target, &buf);
|
||||
return AbiAlignmentAdvanced{ .scalar = int_ty.abiAlignment(target) };
|
||||
switch (strat) {
|
||||
.sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
|
||||
.lazy => |arena| {
|
||||
if (!struct_obj.haveLayout()) {
|
||||
return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) };
|
||||
}
|
||||
},
|
||||
.eager => {},
|
||||
}
|
||||
assert(struct_obj.haveLayout());
|
||||
return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) };
|
||||
}
|
||||
|
||||
const fields = ty.structFields();
|
||||
@ -3192,17 +3200,16 @@ pub const Type = extern union {
|
||||
.Packed => {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
switch (strat) {
|
||||
.sema_kit => |sk| _ = try sk.sema.resolveTypeFields(sk.block, sk.src, ty),
|
||||
.sema_kit => |sk| try sk.sema.resolveTypeLayout(sk.block, sk.src, ty),
|
||||
.lazy => |arena| {
|
||||
if (!struct_obj.haveFieldTypes()) {
|
||||
if (!struct_obj.haveLayout()) {
|
||||
return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) };
|
||||
}
|
||||
},
|
||||
.eager => {},
|
||||
}
|
||||
var buf: Type.Payload.Bits = undefined;
|
||||
const int_ty = struct_obj.packedIntegerType(target, &buf);
|
||||
return AbiSizeAdvanced{ .scalar = int_ty.abiSize(target) };
|
||||
assert(struct_obj.haveLayout());
|
||||
return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) };
|
||||
},
|
||||
else => {
|
||||
switch (strat) {
|
||||
|
||||
@ -3472,44 +3472,6 @@ pub const Value = extern union {
|
||||
return fromBigInt(allocator, result_q.toConst());
|
||||
}
|
||||
|
||||
pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
|
||||
if (ty.zigTypeTag() == .Vector) {
|
||||
const result_data = try allocator.alloc(Value, ty.vectorLen());
|
||||
for (result_data) |*scalar, i| {
|
||||
scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator, target);
|
||||
}
|
||||
return Value.Tag.aggregate.create(allocator, result_data);
|
||||
}
|
||||
return intRemScalar(lhs, rhs, allocator, target);
|
||||
}
|
||||
|
||||
pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value {
|
||||
// TODO is this a performance issue? maybe we should try the operation without
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
var rhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, target);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, target);
|
||||
const limbs_q = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_r = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
// TODO: consider reworking Sema to re-use Values rather than
|
||||
// always producing new Value objects.
|
||||
rhs_bigint.limbs.len,
|
||||
);
|
||||
const limbs_buffer = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
|
||||
);
|
||||
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
|
||||
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
|
||||
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
return fromBigInt(allocator, result_r.toConst());
|
||||
}
|
||||
|
||||
pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
|
||||
if (ty.zigTypeTag() == .Vector) {
|
||||
const result_data = try allocator.alloc(Value, ty.vectorLen());
|
||||
|
||||
@ -165,6 +165,7 @@ test {
|
||||
|
||||
if (builtin.zig_backend != .stage1) {
|
||||
_ = @import("behavior/decltest.zig");
|
||||
_ = @import("behavior/packed_struct_explicit_backing_int.zig");
|
||||
}
|
||||
|
||||
if (builtin.os.tag != .wasi) {
|
||||
|
||||
@ -1721,3 +1721,18 @@ fn testAbsFloat() !void {
|
||||
fn testAbsFloatOne(in: f32, out: f32) !void {
|
||||
try expect(@fabs(@as(f32, in)) == @as(f32, out));
|
||||
}
|
||||
|
||||
test "mod lazy values" {
|
||||
{
|
||||
const X = struct { x: u32 };
|
||||
const x = @sizeOf(X);
|
||||
const y = 1 % x;
|
||||
_ = y;
|
||||
}
|
||||
{
|
||||
const X = struct { x: u32 };
|
||||
const x = @sizeOf(X);
|
||||
const y = x % 1;
|
||||
_ = y;
|
||||
}
|
||||
}
|
||||
|
||||
53
test/behavior/packed_struct_explicit_backing_int.zig
Normal file
53
test/behavior/packed_struct_explicit_backing_int.zig
Normal file
@ -0,0 +1,53 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
test "packed struct explicit backing integer" {
|
||||
assert(builtin.zig_backend != .stage1);
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
const S1 = packed struct { a: u8, b: u8, c: u8 };
|
||||
|
||||
const S2 = packed struct(i24) { d: u8, e: u8, f: u8 };
|
||||
|
||||
const S3 = packed struct { x: S1, y: S2 };
|
||||
const S3Padded = packed struct(u64) { s3: S3, pad: u16 };
|
||||
|
||||
try expectEqual(48, @bitSizeOf(S3));
|
||||
try expectEqual(@sizeOf(u48), @sizeOf(S3));
|
||||
|
||||
try expectEqual(3, @offsetOf(S3, "y"));
|
||||
try expectEqual(24, @bitOffsetOf(S3, "y"));
|
||||
|
||||
if (native_endian == .Little) {
|
||||
const s3 = @bitCast(S3Padded, @as(u64, 0xe952d5c71ff4)).s3;
|
||||
try expectEqual(@as(u8, 0xf4), s3.x.a);
|
||||
try expectEqual(@as(u8, 0x1f), s3.x.b);
|
||||
try expectEqual(@as(u8, 0xc7), s3.x.c);
|
||||
try expectEqual(@as(u8, 0xd5), s3.y.d);
|
||||
try expectEqual(@as(u8, 0x52), s3.y.e);
|
||||
try expectEqual(@as(u8, 0xe9), s3.y.f);
|
||||
}
|
||||
|
||||
const S4 = packed struct { a: i32, b: i8 };
|
||||
const S5 = packed struct(u80) { a: i32, b: i8, c: S4 };
|
||||
const S6 = packed struct(i80) { a: i32, b: S4, c: i8 };
|
||||
|
||||
const expectedBitSize = 80;
|
||||
const expectedByteSize = @sizeOf(u80);
|
||||
try expectEqual(expectedBitSize, @bitSizeOf(S5));
|
||||
try expectEqual(expectedByteSize, @sizeOf(S5));
|
||||
try expectEqual(expectedBitSize, @bitSizeOf(S6));
|
||||
try expectEqual(expectedByteSize, @sizeOf(S6));
|
||||
|
||||
try expectEqual(5, @offsetOf(S5, "c"));
|
||||
try expectEqual(40, @bitOffsetOf(S5, "c"));
|
||||
try expectEqual(9, @offsetOf(S6, "c"));
|
||||
try expectEqual(72, @bitOffsetOf(S6, "c"));
|
||||
}
|
||||
@ -293,6 +293,7 @@ test "type info: struct info" {
|
||||
fn testStruct() !void {
|
||||
const unpacked_struct_info = @typeInfo(TestStruct);
|
||||
try expect(unpacked_struct_info.Struct.is_tuple == false);
|
||||
try expect(unpacked_struct_info.Struct.backing_integer == null);
|
||||
try expect(unpacked_struct_info.Struct.fields[0].alignment == @alignOf(u32));
|
||||
try expect(@ptrCast(*const u32, unpacked_struct_info.Struct.fields[0].default_value.?).* == 4);
|
||||
try expect(mem.eql(u8, "foobar", @ptrCast(*const *const [6:0]u8, unpacked_struct_info.Struct.fields[1].default_value.?).*));
|
||||
@ -315,6 +316,7 @@ fn testPackedStruct() !void {
|
||||
try expect(struct_info == .Struct);
|
||||
try expect(struct_info.Struct.is_tuple == false);
|
||||
try expect(struct_info.Struct.layout == .Packed);
|
||||
try expect(struct_info.Struct.backing_integer == u128);
|
||||
try expect(struct_info.Struct.fields.len == 4);
|
||||
try expect(struct_info.Struct.fields[0].alignment == 0);
|
||||
try expect(struct_info.Struct.fields[2].field_type == f32);
|
||||
@ -326,7 +328,7 @@ fn testPackedStruct() !void {
|
||||
}
|
||||
|
||||
const TestPackedStruct = packed struct {
|
||||
fieldA: usize,
|
||||
fieldA: u64,
|
||||
fieldB: void,
|
||||
fieldC: f32,
|
||||
fieldD: u32 = 4,
|
||||
|
||||
@ -0,0 +1,55 @@
|
||||
export fn entry1() void {
|
||||
_ = @sizeOf(packed struct(u32) {
|
||||
x: u1,
|
||||
y: u24,
|
||||
z: u4,
|
||||
});
|
||||
}
|
||||
export fn entry2() void {
|
||||
_ = @sizeOf(packed struct(i31) {
|
||||
x: u4,
|
||||
y: u24,
|
||||
z: u4,
|
||||
});
|
||||
}
|
||||
|
||||
export fn entry3() void {
|
||||
_ = @sizeOf(packed struct(void) {
|
||||
x: void,
|
||||
});
|
||||
}
|
||||
|
||||
export fn entry4() void {
|
||||
_ = @sizeOf(packed struct(void) {});
|
||||
}
|
||||
|
||||
export fn entry5() void {
|
||||
_ = @sizeOf(packed struct(noreturn) {});
|
||||
}
|
||||
|
||||
export fn entry6() void {
|
||||
_ = @sizeOf(packed struct(f64) {
|
||||
x: u32,
|
||||
y: f32,
|
||||
});
|
||||
}
|
||||
|
||||
export fn entry7() void {
|
||||
_ = @sizeOf(packed struct(*u32) {
|
||||
x: u4,
|
||||
y: u24,
|
||||
z: u4,
|
||||
});
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=llvm
|
||||
// target=native
|
||||
//
|
||||
// :2:31: error: backing integer type 'u32' has bit size 32 but the struct fields have a total bit size of 29
|
||||
// :9:31: error: backing integer type 'i31' has bit size 31 but the struct fields have a total bit size of 32
|
||||
// :17:31: error: expected backing integer type, found 'void'
|
||||
// :23:31: error: expected backing integer type, found 'void'
|
||||
// :27:31: error: expected backing integer type, found 'noreturn'
|
||||
// :31:31: error: expected backing integer type, found 'f64'
|
||||
// :38:31: error: expected backing integer type, found '*u32'
|
||||
Loading…
x
Reference in New Issue
Block a user