stage2: move anon tuples and anon structs to InternPool

This commit is contained in:
Andrew Kelley 2023-05-14 19:23:41 -07:00
parent 88dbd62bcb
commit d18881de1b
11 changed files with 1143 additions and 1260 deletions

View File

@ -137,9 +137,14 @@ pub const Key = union(enum) {
payload_type: Index,
},
simple_type: SimpleType,
/// If `empty_struct_type` is handled separately, then this value may be
/// safely assumed to never be `none`.
/// This represents a struct that has been explicitly declared in source code,
/// or was created with `@Type`. It is unique and based on a declaration.
/// It may be a tuple, if declared like this: `struct {A, B, C}`.
struct_type: StructType,
/// This is an anonymous struct or tuple type which has no corresponding
/// declaration. It is used for types that have no `struct` keyword in the
/// source code, and were not created via `@Type`.
anon_struct_type: AnonStructType,
union_type: UnionType,
opaque_type: OpaqueType,
enum_type: EnumType,
@ -168,7 +173,7 @@ pub const Key = union(enum) {
/// Each element/field stored as an `Index`.
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored,
/// so the slice length will be one more than the type's array length.
aggregate: Aggregate,
aggregate: Key.Aggregate,
/// An instance of a union.
un: Union,
@ -222,22 +227,25 @@ pub const Key = union(enum) {
namespace: Module.Namespace.Index,
};
/// There are three possibilities here:
/// * `@TypeOf(.{})` (untyped empty struct literal)
/// - namespace == .none, index == .none
/// * A struct which has a namepace, but no fields.
/// - index == .none
/// * A struct which has fields as well as a namepace.
pub const StructType = struct {
/// The `none` tag is used to represent two cases:
/// * `@TypeOf(.{})`, in which case `namespace` will also be `none`.
/// * A struct with no fields, in which case `namespace` will be populated.
/// The `none` tag is used to represent a struct with no fields.
index: Module.Struct.OptionalIndex,
/// This will be `none` only in the case of `@TypeOf(.{})`
/// (`Index.empty_struct_type`).
/// May be `none` if the struct has no declarations.
namespace: Module.Namespace.OptionalIndex,
};
pub const AnonStructType = struct {
types: []const Index,
/// This may be empty, indicating this is a tuple.
names: []const NullTerminatedString,
/// These elements may be `none`, indicating runtime-known.
values: []const Index,
pub fn isTuple(self: AnonStructType) bool {
return self.names.len == 0;
}
};
pub const UnionType = struct {
index: Module.Union.Index,
runtime_tag: RuntimeTag,
@ -498,6 +506,12 @@ pub const Key = union(enum) {
std.hash.autoHash(hasher, aggregate.ty);
for (aggregate.fields) |field| std.hash.autoHash(hasher, field);
},
.anon_struct_type => |anon_struct_type| {
for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem);
for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem);
for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem);
},
}
}
@ -650,6 +664,12 @@ pub const Key = union(enum) {
if (a_info.ty != b_info.ty) return false;
return std.mem.eql(Index, a_info.fields, b_info.fields);
},
.anon_struct_type => |a_info| {
const b_info = b.anon_struct_type;
return std.mem.eql(Index, a_info.types, b_info.types) and
std.mem.eql(Index, a_info.values, b_info.values) and
std.mem.eql(NullTerminatedString, a_info.names, b_info.names);
},
}
}
@ -666,6 +686,7 @@ pub const Key = union(enum) {
.union_type,
.opaque_type,
.enum_type,
.anon_struct_type,
=> .type_type,
inline .ptr,
@ -1020,9 +1041,10 @@ pub const static_keys = [_]Key{
.{ .simple_type = .var_args_param },
// empty_struct_type
.{ .struct_type = .{
.namespace = .none,
.index = .none,
.{ .anon_struct_type = .{
.types = &.{},
.names = &.{},
.values = &.{},
} },
.{ .simple_value = .undefined },
@ -1144,6 +1166,12 @@ pub const Tag = enum(u8) {
/// Module.Struct object allocated for it.
/// data is Module.Namespace.Index.
type_struct_ns,
/// An AnonStructType which stores types, names, and values for each field.
/// data is extra index of `TypeStructAnon`.
type_struct_anon,
/// An AnonStructType which has only types and values for each field.
/// data is extra index of `TypeStructAnon`.
type_tuple_anon,
/// A tagged union type.
/// `data` is `Module.Union.Index`.
type_union_tagged,
@ -1249,6 +1277,26 @@ pub const Tag = enum(u8) {
only_possible_value,
/// data is extra index to Key.Union.
union_value,
/// An instance of a struct, array, or vector.
/// data is extra index to `Aggregate`.
aggregate,
};
/// Trailing:
/// 0. element: Index for each len
/// len is determined by the aggregate type.
pub const Aggregate = struct {
/// The type of the aggregate.
ty: Index,
};
/// Trailing:
/// 0. type: Index for each fields_len
/// 1. value: Index for each fields_len
/// 2. name: NullTerminatedString for each fields_len
/// The set of field names is omitted when the `Tag` is `type_tuple_anon`.
pub const TypeStructAnon = struct {
fields_len: u32,
};
/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to
@ -1572,6 +1620,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
}
pub fn indexToKey(ip: InternPool, index: Index) Key {
assert(index != .none);
const item = ip.items.get(@enumToInt(index));
const data = item.data;
return switch (item.tag) {
@ -1659,6 +1708,30 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.namespace = @intToEnum(Module.Namespace.Index, data).toOptional(),
} },
.type_struct_anon => {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data);
const fields_len = type_struct_anon.data.fields_len;
const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len];
return .{ .anon_struct_type = .{
.types = @ptrCast([]const Index, types),
.values = @ptrCast([]const Index, values),
.names = @ptrCast([]const NullTerminatedString, names),
} };
},
.type_tuple_anon => {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data);
const fields_len = type_struct_anon.data.fields_len;
const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
return .{ .anon_struct_type = .{
.types = @ptrCast([]const Index, types),
.values = @ptrCast([]const Index, values),
.names = &.{},
} };
},
.type_union_untagged => .{ .union_type = .{
.index = @intToEnum(Module.Union.Index, data),
.runtime_tag = .none,
@ -1797,6 +1870,15 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
else => unreachable,
};
},
.aggregate => {
const extra = ip.extraDataTrail(Aggregate, data);
const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty));
const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]);
return .{ .aggregate = .{
.ty = extra.data.ty,
.fields = fields,
} };
},
.union_value => .{ .un = ip.extraData(Key.Union, data) },
.enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) },
};
@ -1982,6 +2064,45 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
.anon_struct_type => |anon_struct_type| {
assert(anon_struct_type.types.len == anon_struct_type.values.len);
for (anon_struct_type.types) |elem| assert(elem != .none);
const fields_len = @intCast(u32, anon_struct_type.types.len);
if (anon_struct_type.names.len == 0) {
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2),
);
ip.items.appendAssumeCapacity(.{
.tag = .type_tuple_anon,
.data = ip.addExtraAssumeCapacity(TypeStructAnon{
.fields_len = fields_len,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types));
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values));
return @intToEnum(Index, ip.items.len - 1);
}
assert(anon_struct_type.names.len == anon_struct_type.types.len);
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3),
);
ip.items.appendAssumeCapacity(.{
.tag = .type_struct_anon,
.data = ip.addExtraAssumeCapacity(TypeStructAnon{
.fields_len = fields_len,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types));
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values));
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names));
return @intToEnum(Index, ip.items.len - 1);
},
.union_type => |union_type| {
ip.items.appendAssumeCapacity(.{
.tag = switch (union_type.runtime_tag) {
@ -2269,6 +2390,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.aggregate => |aggregate| {
assert(aggregate.ty != .none);
for (aggregate.fields) |elem| assert(elem != .none);
if (aggregate.fields.len != ip.aggregateTypeLen(aggregate.ty)) {
std.debug.print("aggregate fields len = {d}, type len = {d}\n", .{
aggregate.fields.len,
ip.aggregateTypeLen(aggregate.ty),
});
}
assert(aggregate.fields.len == ip.aggregateTypeLen(aggregate.ty));
if (aggregate.fields.len == 0) {
ip.items.appendAssumeCapacity(.{
.tag = .only_possible_value,
@ -2276,7 +2407,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
return @intToEnum(Index, ip.items.len - 1);
}
@panic("TODO");
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(Aggregate).Struct.fields.len + aggregate.fields.len,
);
ip.items.appendAssumeCapacity(.{
.tag = .aggregate,
.data = ip.addExtraAssumeCapacity(Aggregate{
.ty = aggregate.ty,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.fields));
},
.un => |un| {
@ -2913,6 +3056,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.type_opaque => @sizeOf(Key.OpaqueType),
.type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl),
.type_struct_ns => @sizeOf(Module.Namespace),
.type_struct_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
},
.type_tuple_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len);
},
.type_union_tagged,
.type_union_untagged,
@ -2942,6 +3093,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
},
.enum_tag => @sizeOf(Key.EnumTag),
.aggregate => b: {
const info = ip.extraData(Aggregate, data);
const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty));
break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len);
},
.float_f16 => 0,
.float_f32 => 0,
.float_f64 => @sizeOf(Float64),
@ -3079,3 +3236,13 @@ pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E {
const int = ip.indexToKey(i).enum_tag.int;
return @intToEnum(E, ip.indexToKey(int).int.storage.u64);
}
pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len,
.vector_type => |vector_type| vector_type.len,
else => unreachable,
};
}

File diff suppressed because it is too large Load Diff

View File

@ -177,13 +177,16 @@ pub fn print(
}
if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) {
switch (field_ptr.container_ty.tag()) {
.tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}),
else => {
const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
return writer.print(".{s}", .{field_name});
switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) {
.anon_struct_type => |anon_struct| {
if (anon_struct.names.len == 0) {
return writer.print(".@\"{d}\"", .{field_ptr.field_index});
}
},
else => {},
}
const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod);
return writer.print(".{s}", .{field_name});
} else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) {
const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index];
return writer.print(".{s}", .{field_name});
@ -396,12 +399,9 @@ fn printAggregate(
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
switch (ty.ip_index) {
.none => switch (ty.tag()) {
.anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
else => {},
},
.none => {}, // TODO make this unreachable after finishing InternPool migration
else => switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
.struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}),
else => {},
},
}

View File

@ -11411,7 +11411,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.fields.keys()[extra.field_index];
const tag_ty = union_obj.tag_ty;
const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?);
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.enumToInt(tag_ty, mod);
const tag_int = tag_int_val.toUnsignedInt(mod);

View File

@ -3417,8 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue {
const op_inst = Air.refToIndex(un_op);
const op_ty = f.typeOf(un_op);
const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty;
var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) {
try reap(f, inst, &.{un_op});
@ -4115,8 +4114,7 @@ fn airCall(
}
resolved_arg.* = try f.resolveInst(arg);
if (arg_cty != try f.typeToIndex(arg_ty, .complete)) {
var lowered_arg_buf: LowerFnRetTyBuffer = undefined;
const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod);
const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod);
const array_local = try f.allocLocal(inst, lowered_arg_ty);
try writer.writeAll("memcpy(");
@ -4146,8 +4144,7 @@ fn airCall(
};
const ret_ty = fn_ty.fnReturnType();
var lowered_ret_buf: LowerFnRetTyBuffer = undefined;
const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod);
const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod);
const result_local = result: {
if (modifier == .always_tail) {
@ -5200,7 +5197,7 @@ fn fieldLocation(
const field_ty = container_ty.structFieldType(next_field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
break .{ .field = if (container_ty.isSimpleTuple())
break .{ .field = if (container_ty.isSimpleTuple(mod))
.{ .field = next_field_index }
else
.{ .identifier = container_ty.structFieldName(next_field_index, mod) } };
@ -5395,16 +5392,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const field_name: CValue = switch (struct_ty.ip_index) {
.none => switch (struct_ty.tag()) {
.tuple, .anon_struct => if (struct_ty.isSimpleTuple())
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
.struct_type => switch (struct_ty.containerLayout(mod)) {
.Auto, .Extern => if (struct_ty.isSimpleTuple())
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
@ -5465,6 +5457,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
},
},
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
.{ .field = extra.field_index }
else
.{ .identifier = struct_ty.structFieldName(extra.field_index, mod) },
.union_type => |union_type| field_name: {
const union_obj = mod.unionPtr(union_type.index);
if (union_obj.layout == .Packed) {
@ -6791,7 +6789,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const a = try Assignment.start(f, writer, field_ty);
try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple())
try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod))
.{ .field = field_i }
else
.{ .identifier = inst_ty.structFieldName(field_i, mod) });
@ -7704,25 +7702,21 @@ const Vectorize = struct {
}
};
const LowerFnRetTyBuffer = struct {
names: [1][]const u8,
types: [1]Type,
values: [1]Value,
payload: Type.Payload.AnonStruct,
};
fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type {
if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn;
fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
if (ret_ty.ip_index == .noreturn_type) return Type.noreturn;
if (lowersToArray(ret_ty, mod)) {
buffer.names = [1][]const u8{"array"};
buffer.types = [1]Type{ret_ty};
buffer.values = [1]Value{Value.@"unreachable"};
buffer.payload = .{ .data = .{
.names = &buffer.names,
.types = &buffer.types,
.values = &buffer.values,
} };
return Type.initPayload(&buffer.payload.base);
const names = [1]InternPool.NullTerminatedString{
try mod.intern_pool.getOrPutString(mod.gpa, "array"),
};
const types = [1]InternPool.Index{ret_ty.ip_index};
const values = [1]InternPool.Index{.none};
const interned = try mod.intern(.{ .anon_struct_type = .{
.names = &names,
.types = &types,
.values = &values,
} });
return interned.toType();
}
return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void;

View File

@ -1951,7 +1951,7 @@ pub const CType = extern union {
defer c_field_i += 1;
fields_pl[c_field_i] = .{
.name = try if (ty.isSimpleTuple())
.name = try if (ty.isSimpleTuple(mod))
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
arena.dupeZ(u8, switch (zig_ty_tag) {
@ -2102,7 +2102,7 @@ pub const CType = extern union {
.payload => unreachable,
}) or !mem.eql(
u8,
if (ty.isSimpleTuple())
if (ty.isSimpleTuple(mod))
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
@ -2224,7 +2224,7 @@ pub const CType = extern union {
.global => .global,
.payload => unreachable,
});
hasher.update(if (ty.isSimpleTuple())
hasher.update(if (ty.isSimpleTuple(mod))
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),

View File

@ -2009,83 +2009,84 @@ pub const Object = struct {
break :blk fwd_decl;
};
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |tuple| {
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
try di_fields.ensureUnusedCapacity(gpa, tuple.types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
const field_size = field_ty.toType().abiSize(mod);
const field_align = field_ty.toType().abiAlignment(mod);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
offset = field_offset + field_size;
const field_size = field_ty.abiSize(mod);
const field_align = field_ty.abiAlignment(mod);
const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align);
offset = field_offset + field_size;
const field_name = if (tuple.names.len != 0)
mod.intern_pool.stringToSlice(tuple.names[i])
else
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
defer gpa.free(field_name);
const field_name = if (ty.castTag(.anon_struct)) |payload|
try gpa.dupeZ(u8, payload.data.names[i])
else
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
defer gpa.free(field_name);
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
field_name,
null, // file
0, // line
field_size * 8, // size in bits
field_align * 8, // align in bits
field_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(field_ty.toType(), .full),
));
}
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
field_name,
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
null, // file
0, // line
field_size * 8, // size in bits
field_align * 8, // align in bits
field_offset * 8, // offset in bits
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
0, // flags
try o.lowerDebugType(field_ty, .full),
));
}
null, // derived from
di_fields.items.ptr,
@intCast(c_int, di_fields.items.len),
0, // run time lang
null, // vtable holder
"", // unique id
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
},
.struct_type => |struct_type| s: {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
const full_di_ty = dib.createStructType(
compile_unit_scope,
name.ptr,
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@intCast(c_int, di_fields.items.len),
0, // run time lang
null, // vtable holder
"", // unique id
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
}
if (mod.typeToStruct(ty)) |struct_obj| {
if (!struct_obj.haveFieldTypes()) {
// This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even
// via pointer). The only reason we are hearing about it now is
// that it is being used as a namespace to put other debug types
// into. Therefore we can satisfy this by making an empty namespace,
// rather than changing the frontend to unnecessarily resolve the
// struct field types.
const owner_decl_index = ty.getOwnerDecl(mod);
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
return struct_di_ty;
}
if (!struct_obj.haveFieldTypes()) {
// This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even
// via pointer). The only reason we are hearing about it now is
// that it is being used as a namespace to put other debug types
// into. Therefore we can satisfy this by making an empty namespace,
// rather than changing the frontend to unnecessarily resolve the
// struct field types.
const owner_decl_index = ty.getOwnerDecl(mod);
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
return struct_di_ty;
}
},
else => {},
}
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) {
@ -2931,59 +2932,61 @@ pub const DeclGen = struct {
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
if (t.isSimpleTupleOrAnonStruct()) {
const tuple = t.tupleFields();
const llvm_struct_ty = dg.context.structCreateNamed("");
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) {
.anon_struct_type => |tuple| {
const llvm_struct_ty = dg.context.structCreateNamed("");
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
defer llvm_field_types.deinit(gpa);
var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
defer llvm_field_types.deinit(gpa);
try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
for (tuple.types, 0..) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
for (tuple.types, tuple.values) |field_ty, field_val| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
try llvm_field_types.append(gpa, llvm_array_ty);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
try llvm_field_types.append(gpa, llvm_array_ty);
}
const field_llvm_ty = try dg.lowerType(field_ty.toType());
try llvm_field_types.append(gpa, field_llvm_ty);
offset += field_ty.toType().abiSize(mod);
}
const field_llvm_ty = try dg.lowerType(field_ty);
try llvm_field_types.append(gpa, field_llvm_ty);
offset += field_ty.abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
try llvm_field_types.append(gpa, llvm_array_ty);
{
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
try llvm_field_types.append(gpa, llvm_array_ty);
}
}
}
llvm_struct_ty.structSetBody(
llvm_field_types.items.ptr,
@intCast(c_uint, llvm_field_types.items.len),
.False,
);
llvm_struct_ty.structSetBody(
llvm_field_types.items.ptr,
@intCast(c_uint, llvm_field_types.items.len),
.False,
);
return llvm_struct_ty;
}
return llvm_struct_ty;
},
.struct_type => |struct_type| struct_type,
else => unreachable,
};
const struct_obj = mod.typeToStruct(t).?;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
@ -3625,71 +3628,74 @@ pub const DeclGen = struct {
const field_vals = tv.val.castTag(.aggregate).?.data;
const gpa = dg.gpa;
if (tv.ty.isSimpleTupleOrAnonStruct()) {
const tuple = tv.ty.tupleFields();
var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
defer llvm_fields.deinit(gpa);
const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
.anon_struct_type => |tuple| {
var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
defer llvm_fields.deinit(gpa);
try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].ip_index != .unreachable_value) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
if (field_val != .none) continue;
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
const field_llvm_val = try dg.lowerValue(.{
.ty = field_ty.toType(),
.val = field_vals[i],
});
need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
llvm_fields.appendAssumeCapacity(field_llvm_val);
offset += field_ty.toType().abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
}
const field_llvm_val = try dg.lowerValue(.{
.ty = field_ty,
.val = field_vals[i],
});
need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val);
llvm_fields.appendAssumeCapacity(field_llvm_val);
offset += field_ty.abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
if (need_unnamed) {
return dg.context.constStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
.False,
);
} else {
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
);
}
}
},
.struct_type => |struct_type| struct_type,
else => unreachable,
};
if (need_unnamed) {
return dg.context.constStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
.False,
);
} else {
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
);
}
}
const struct_obj = mod.typeToStruct(tv.ty).?;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
@ -4077,13 +4083,11 @@ pub const DeclGen = struct {
return field_addr.constIntToPtr(final_llvm_ty);
}
var ty_buf: Type.Payload.Pointer = undefined;
const parent_llvm_ty = try dg.lowerType(parent_ty);
if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| {
if (llvmField(parent_ty, field_index, mod)) |llvm_field| {
const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(llvm_field_index, .False),
llvm_u32.constInt(llvm_field.index, .False),
};
return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len);
} else {
@ -6006,8 +6010,7 @@ pub const FuncGen = struct {
return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
},
else => {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index;
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
},
},
@ -6035,16 +6038,22 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag(mod)) {
.Struct => {
assert(struct_ty.containerLayout(mod) != .Packed);
var ptr_ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, "");
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = llvm_field.ty.ip_index,
.alignment = llvm_field.alignment,
});
if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false);
const field_alignment = if (llvm_field.alignment != 0)
llvm_field.alignment
else
llvm_field.ty.abiAlignment(mod);
return self.loadByRef(field_ptr, field_ty, field_alignment, false);
} else {
return self.load(field_ptr, field_ptr_ty);
}
@ -6912,12 +6921,14 @@ pub const FuncGen = struct {
const struct_ty = self.air.getRefType(ty_pl.ty);
const field_index = ty_pl.payload;
var ptr_ty_buf: Type.Payload.Pointer = undefined;
const mod = self.dg.module;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?;
const llvm_field = llvmField(struct_ty, field_index, mod).?;
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, "");
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = llvm_field.ty.ip_index,
.alignment = llvm_field.alignment,
});
return self.load(field_ptr, field_ptr_ty);
}
@ -7430,9 +7441,8 @@ pub const FuncGen = struct {
const result = self.builder.buildExtractValue(result_struct, 0, "");
const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
var ty_buf: Type.Payload.Pointer = undefined;
const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?;
const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?;
const result_index = llvmField(dest_ty, 0, mod).?.index;
const overflow_index = llvmField(dest_ty, 1, mod).?.index;
if (isByRef(dest_ty, mod)) {
const result_alignment = dest_ty.abiAlignment(mod);
@ -7736,9 +7746,8 @@ pub const FuncGen = struct {
const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, "");
var ty_buf: Type.Payload.Pointer = undefined;
const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?;
const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?;
const result_index = llvmField(dest_ty, 0, mod).?.index;
const overflow_index = llvmField(dest_ty, 1, mod).?.index;
if (isByRef(dest_ty, mod)) {
const result_alignment = dest_ty.abiAlignment(mod);
@ -9300,8 +9309,6 @@ pub const FuncGen = struct {
return running_int;
}
var ptr_ty_buf: Type.Payload.Pointer = undefined;
if (isByRef(result_ty, mod)) {
const llvm_u32 = self.context.intType(32);
// TODO in debug builds init to undef so that the padding will be 0xaa
@ -9313,7 +9320,7 @@ pub const FuncGen = struct {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?;
const llvm_i = llvmField(result_ty, i, mod).?.index;
indices[1] = llvm_u32.constInt(llvm_i, .False);
const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, "");
var field_ptr_payload: Type.Payload.Pointer = .{
@ -9334,7 +9341,7 @@ pub const FuncGen = struct {
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
const llvm_elem = try self.resolveInst(elem);
const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?;
const llvm_i = llvmField(result_ty, i, mod).?.index;
result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, "");
}
return result;
@ -9796,9 +9803,8 @@ pub const FuncGen = struct {
else => {
const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty);
var ty_buf: Type.Payload.Pointer = undefined;
if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| {
return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, "");
if (llvmField(struct_ty, field_index, mod)) |llvm_field| {
return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, "");
} else {
// If we found no index then this means this is a zero sized field at the
// end of the struct. Treat our struct pointer as an array of two and get
@ -10457,59 +10463,61 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ
};
}
const LlvmField = struct {
index: c_uint,
ty: Type,
alignment: u32,
};
/// Take into account 0 bit fields and padding. Returns null if an llvm
/// field could not be found.
/// This only happens if you want the field index of a zero sized field at
/// the end of the struct.
fn llvmFieldIndex(
ty: Type,
field_index: usize,
mod: *Module,
ptr_pl_buf: *Type.Payload.Pointer,
) ?c_uint {
fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
// Detects where we inserted extra padding fields so that we can skip
// over them in this function.
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var llvm_field_index: c_uint = 0;
for (tuple.types, 0..) |field_ty, i| {
if (tuple.values[i].ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue;
const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |tuple| {
var llvm_field_index: c_uint = 0;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
const field_align = field_ty.abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
llvm_field_index += 1;
}
if (field_index <= i) {
return .{
.index = llvm_field_index,
.ty = field_ty.toType(),
.alignment = field_align,
};
}
const padding_len = offset - prev_offset;
if (padding_len > 0) {
llvm_field_index += 1;
offset += field_ty.toType().abiSize(mod);
}
if (field_index <= i) {
ptr_pl_buf.* = .{
.data = .{
.pointee_type = field_ty,
.@"align" = field_align,
.@"addrspace" = .generic,
},
};
return llvm_field_index;
}
llvm_field_index += 1;
offset += field_ty.abiSize(mod);
}
return null;
}
const layout = ty.containerLayout(mod);
return null;
},
.struct_type => |s| s,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const layout = struct_obj.layout;
assert(layout != .Packed);
var llvm_field_index: c_uint = 0;
var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const field_align = field.alignment(mod, layout);
@ -10523,14 +10531,11 @@ fn llvmFieldIndex(
}
if (field_index == field_and_index.index) {
ptr_pl_buf.* = .{
.data = .{
.pointee_type = field.ty,
.@"align" = field_align,
.@"addrspace" = .generic,
},
return .{
.index = llvm_field_index,
.ty = field.ty,
.alignment = field_align,
};
return llvm_field_index;
}
llvm_field_index += 1;
@ -11089,21 +11094,24 @@ fn isByRef(ty: Type, mod: *Module) bool {
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout(mod) == .Packed) return false;
if (ty.isSimpleTupleOrAnonStruct()) {
const tuple = ty.tupleFields();
var count: usize = 0;
for (tuple.values, 0..) |field_val, i| {
if (field_val.ip_index != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue;
const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |tuple| {
var count: usize = 0;
for (tuple.types, tuple.values) |field_ty, field_val| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
count += 1;
if (count > max_fields_byval) return true;
if (isByRef(tuple.types[i], mod)) return true;
}
return false;
}
count += 1;
if (count > max_fields_byval) return true;
if (isByRef(field_ty.toType(), mod)) return true;
}
return false;
},
.struct_type => |s| s,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
var count: usize = 0;
const fields = ty.structFields(mod);
for (fields.values()) |field| {
for (struct_obj.fields.values()) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
count += 1;

View File

@ -682,7 +682,7 @@ pub const DeclGen = struct {
else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}),
},
.Struct => {
if (ty.isSimpleTupleOrAnonStruct()) {
if (ty.isSimpleTupleOrAnonStruct(mod)) {
unreachable; // TODO
} else {
const struct_ty = mod.typeToStruct(ty).?;
@ -1319,7 +1319,8 @@ pub const DeclGen = struct {
defer self.gpa.free(member_names);
var member_index: usize = 0;
for (struct_ty.fields.values(), 0..) |field, i| {
const struct_obj = void; // TODO
for (struct_obj.fields.values(), 0..) |field, i| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field.ty, .indirect);
@ -1327,7 +1328,7 @@ pub const DeclGen = struct {
member_index += 1;
}
const name = try struct_ty.getFullyQualifiedName(self.module);
const name = try struct_obj.getFullyQualifiedName(self.module);
defer self.module.gpa.free(name);
return try self.spv.resolve(.{ .struct_type = .{
@ -2090,7 +2091,7 @@ pub const DeclGen = struct {
var i: usize = 0;
while (i < mask_len) : (i += 1) {
const elem = try mask.elemValue(self.module, i);
const elem = try mask.elemValue(mod, i);
if (elem.isUndef(mod)) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
@ -2805,7 +2806,7 @@ pub const DeclGen = struct {
const value = try self.resolve(bin_op.rhs);
const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false;
const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false;
if (val_is_undef) {
const undef = try self.spv.constUndef(ptr_ty_ref);
try self.store(ptr_ty, ptr, undef);

View File

@ -333,13 +333,12 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
switch (ty.tag()) {
.tuple, .anon_struct => {
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |fields| {
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
const fields = ty.tupleFields();
for (fields.types, 0..) |field, field_index| {
for (fields.types, 0..) |field_ty, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@ -347,28 +346,30 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
else => {
.struct_type => |struct_type| s: {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
// DW.AT.name, DW.FORM.string
const struct_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
const struct_obj = mod.typeToStruct(ty).?;
if (struct_obj.layout == .Packed) {
log.debug("TODO implement .debug_info for packed structs", .{});
break :blk;
}
const fields = ty.structFields(mod);
for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?;
for (
struct_obj.fields.keys(),
struct_obj.fields.values(),
0..,
) |field_name, field, field_index| {
if (!field.ty.hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
@ -385,6 +386,7 @@ pub const DeclState = struct {
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
else => unreachable,
}
// DW.AT.structure_type delimit children

File diff suppressed because it is too large Load Diff

View File

@ -1889,26 +1889,28 @@ pub const Value = struct {
const b_field_vals = b.castTag(.aggregate).?.data;
assert(a_field_vals.len == b_field_vals.len);
if (ty.isSimpleTupleOrAnonStruct()) {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
for (types, 0..) |field_ty, i| {
if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) {
return false;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |anon_struct| {
assert(anon_struct.types.len == a_field_vals.len);
for (anon_struct.types, 0..) |field_ty, i| {
if (!(try eqlAdvanced(a_field_vals[i], field_ty.toType(), b_field_vals[i], field_ty.toType(), mod, opt_sema))) {
return false;
}
}
}
return true;
}
if (ty.zigTypeTag(mod) == .Struct) {
const fields = ty.structFields(mod).values();
assert(fields.len == a_field_vals.len);
for (fields, 0..) |field, i| {
if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
return false;
return true;
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const fields = struct_obj.fields.values();
assert(fields.len == a_field_vals.len);
for (fields, 0..) |field, i| {
if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) {
return false;
}
}
}
return true;
return true;
},
else => {},
}
const elem_ty = ty.childType(mod);
@ -2017,20 +2019,6 @@ pub const Value = struct {
if ((try ty.onePossibleValue(mod)) != null) {
return true;
}
if (a_ty.castTag(.anon_struct)) |payload| {
const tuple = payload.data;
if (tuple.values.len != 1) {
return false;
}
const field_name = tuple.names[0];
const union_obj = mod.typeToUnion(ty).?;
const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false);
const tag_and_val = b.castTag(.@"union").?.data;
const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod);
if (!tag_matches) return false;
return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema);
}
return false;
},
.Float => {