InternPool: prevent anon struct UAF bugs with type safety

Instead of using actual slices for InternPool.Key.AnonStructType, this
commit changes to use Slice types instead, which store a
long-lived index rather than a pointer.

This is a follow-up to 7ef1eb1c27754cb0349fdc10db1f02ff2dddd99b.
This commit is contained in:
Andrew Kelley 2023-09-12 13:32:14 -07:00
parent 7e2b6b0f1b
commit cb6201715a
10 changed files with 248 additions and 178 deletions

View File

@ -373,11 +373,11 @@ pub const Key = union(enum) {
};
pub const AnonStructType = struct {
types: []const Index,
types: Index.Slice,
/// This may be empty, indicating this is a tuple.
names: []const NullTerminatedString,
names: NullTerminatedString.Slice,
/// These elements may be `none`, indicating runtime-known.
values: []const Index,
values: Index.Slice,
pub fn isTuple(self: AnonStructType) bool {
return self.names.len == 0;
@ -1020,9 +1020,9 @@ pub const Key = union(enum) {
.anon_struct_type => |anon_struct_type| {
var hasher = Hash.init(seed);
for (anon_struct_type.types) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.values) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.names) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
for (anon_struct_type.names.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
return hasher.final();
},
@ -1352,9 +1352,9 @@ pub const Key = union(enum) {
},
.anon_struct_type => |a_info| {
const b_info = b.anon_struct_type;
return std.mem.eql(Index, a_info.types, b_info.types) and
std.mem.eql(Index, a_info.values, b_info.values) and
std.mem.eql(NullTerminatedString, a_info.names, b_info.names);
return std.mem.eql(Index, a_info.types.get(ip), b_info.types.get(ip)) and
std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip)) and
std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip));
},
.error_set_type => |a_info| {
const b_info = b.error_set_type;
@ -2113,9 +2113,9 @@ pub const static_keys = [_]Key{
// empty_struct_type
.{ .anon_struct_type = .{
.types = &.{},
.names = &.{},
.values = &.{},
.types = .{ .start = 0, .len = 0 },
.names = .{ .start = 0, .len = 0 },
.values = .{ .start = 0, .len = 0 },
} },
.{ .simple_value = .undefined },
@ -3025,7 +3025,17 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void {
// This inserts all the statically-known values into the intern pool in the
// order expected.
for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable;
for (static_keys[0..@intFromEnum(Index.empty_struct_type)]) |key| {
_ = ip.get(gpa, key) catch unreachable;
}
_ = ip.getAnonStructType(gpa, .{
.types = &.{},
.names = &.{},
.values = &.{},
}) catch unreachable;
for (static_keys[@intFromEnum(Index.empty_struct_type) + 1 ..]) |key| {
_ = ip.get(gpa, key) catch unreachable;
}
if (std.debug.runtime_safety) {
// Sanity check.
@ -3155,30 +3165,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.namespace = @as(Module.Namespace.Index, @enumFromInt(data)).toOptional(),
} },
.type_struct_anon => {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data);
const fields_len = type_struct_anon.data.fields_len;
const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len];
return .{ .anon_struct_type = .{
.types = @ptrCast(types),
.values = @ptrCast(values),
.names = @ptrCast(names),
} };
},
.type_tuple_anon => {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data);
const fields_len = type_struct_anon.data.fields_len;
const types = ip.extra.items[type_struct_anon.end..][0..fields_len];
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
return .{ .anon_struct_type = .{
.types = @ptrCast(types),
.values = @ptrCast(values),
.names = &.{},
} };
},
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
.type_union => .{ .union_type = extraUnionType(ip, data) },
.type_enum_auto => {
@ -3577,6 +3565,44 @@ fn extraUnionType(ip: *const InternPool, extra_index: u32) Key.UnionType {
};
}
fn extraTypeStructAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index);
const fields_len = type_struct_anon.data.fields_len;
return .{
.types = .{
.start = type_struct_anon.end,
.len = fields_len,
},
.values = .{
.start = type_struct_anon.end + fields_len,
.len = fields_len,
},
.names = .{
.start = type_struct_anon.end + fields_len + fields_len,
.len = fields_len,
},
};
}
fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructType {
const type_struct_anon = ip.extraDataTrail(TypeStructAnon, extra_index);
const fields_len = type_struct_anon.data.fields_len;
return .{
.types = .{
.start = type_struct_anon.end,
.len = fields_len,
},
.values = .{
.start = type_struct_anon.end + fields_len,
.len = fields_len,
},
.names = .{
.start = 0,
.len = 0,
},
};
}
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
var index: usize = type_function.end;
@ -3864,44 +3890,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
.anon_struct_type => |anon_struct_type| {
assert(anon_struct_type.types.len == anon_struct_type.values.len);
for (anon_struct_type.types) |elem| assert(elem != .none);
const fields_len: u32 = @intCast(anon_struct_type.types.len);
if (anon_struct_type.names.len == 0) {
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2),
);
ip.items.appendAssumeCapacity(.{
.tag = .type_tuple_anon,
.data = ip.addExtraAssumeCapacity(TypeStructAnon{
.fields_len = fields_len,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(anon_struct_type.types));
ip.extra.appendSliceAssumeCapacity(@ptrCast(anon_struct_type.values));
return @enumFromInt(ip.items.len - 1);
}
assert(anon_struct_type.names.len == anon_struct_type.types.len);
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3),
);
ip.items.appendAssumeCapacity(.{
.tag = .type_struct_anon,
.data = ip.addExtraAssumeCapacity(TypeStructAnon{
.fields_len = fields_len,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(anon_struct_type.types));
ip.extra.appendSliceAssumeCapacity(@ptrCast(anon_struct_type.values));
ip.extra.appendSliceAssumeCapacity(@ptrCast(anon_struct_type.names));
return @enumFromInt(ip.items.len - 1);
},
.anon_struct_type => unreachable, // use getAnonStructType() instead
.union_type => unreachable, // use getUnionType() instead
@ -4408,7 +4397,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
}
},
.anon_struct_type => |anon_struct_type| {
for (aggregate.storage.values(), anon_struct_type.types) |elem, ty| {
for (aggregate.storage.values(), anon_struct_type.types.get(ip)) |elem, ty| {
assert(ip.typeOf(elem) == ty);
}
},
@ -4426,7 +4415,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
switch (ty_key) {
.anon_struct_type => |anon_struct_type| opv: {
switch (aggregate.storage) {
.bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| {
.bytes => |bytes| for (anon_struct_type.values.get(ip), bytes) |value, byte| {
if (value != ip.getIfExists(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
@ -4434,10 +4423,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.elems => |elems| if (!std.mem.eql(
Index,
anon_struct_type.values,
anon_struct_type.values.get(ip),
elems,
)) break :opv,
.repeated_elem => |elem| for (anon_struct_type.values) |value| {
.repeated_elem => |elem| for (anon_struct_type.values.get(ip)) |value| {
if (value != elem) break :opv;
},
}
@ -4646,6 +4635,53 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
return @enumFromInt(ip.items.len - 1);
}
pub const AnonStructTypeInit = struct {
types: []const Index,
/// This may be empty, indicating this is a tuple.
names: []const NullTerminatedString,
/// These elements may be `none`, indicating runtime-known.
values: []const Index,
};
pub fn getAnonStructType(ip: *InternPool, gpa: Allocator, ini: AnonStructTypeInit) Allocator.Error!Index {
assert(ini.types.len == ini.values.len);
for (ini.types) |elem| assert(elem != .none);
const prev_extra_len = ip.extra.items.len;
const fields_len: u32 = @intCast(ini.types.len);
try ip.extra.ensureUnusedCapacity(
gpa,
@typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3),
);
try ip.items.ensureUnusedCapacity(gpa, 1);
const extra_index = ip.addExtraAssumeCapacity(TypeStructAnon{
.fields_len = fields_len,
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.types));
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.values));
const adapter: KeyAdapter = .{ .intern_pool = ip };
const key: Key = .{
.anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(ip, extra_index) else k: {
assert(ini.names.len == ini.types.len);
ip.extra.appendSliceAssumeCapacity(@ptrCast(ini.names));
break :k extraTypeStructAnon(ip, extra_index);
},
};
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) {
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
}
ip.items.appendAssumeCapacity(.{
.tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon,
.data = extra_index,
});
return @enumFromInt(ip.items.len - 1);
}
/// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`.
pub const GetFuncTypeKey = struct {
param_types: []Index,
@ -6056,7 +6092,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
for (agg_elems, 0..) |*elem, i| {
const new_elem_ty = switch (ip.indexToKey(new_ty)) {
inline .array_type, .vector_type => |seq_type| seq_type.child,
.anon_struct_type => |anon_struct_type| anon_struct_type.types[i],
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
.struct_type => |struct_type| ip.structPtr(struct_type.index.unwrap().?)
.fields.values()[i].ty.toIntern(),
else => unreachable,

View File

@ -8052,11 +8052,12 @@ fn instantiateGenericCall(
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
const mod = sema.mod;
const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
const tuple = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| tuple,
else => return,
};
for (tuple.types, tuple.values) |field_ty, field_val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
try sema.resolveTupleLazyValues(block, src, field_ty.toType());
if (field_val == .none) continue;
// TODO: mutate in intern pool
@ -12929,7 +12930,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.anon_struct_type => |anon_struct| {
if (anon_struct.names.len != 0) {
break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null;
break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names.get(ip), field_name) != null;
} else {
const field_index = field_name.toUnsigned(ip) orelse break :hf false;
break :hf field_index < ty.structFieldCount(mod);
@ -13558,11 +13559,11 @@ fn analyzeTupleCat(
break :rs runtime_src;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{
.types = types,
.values = values,
.names = &.{},
} });
});
const runtime_src = opt_runtime_src orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
@ -13889,11 +13890,11 @@ fn analyzeTupleMul(
break :rs runtime_src;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
const tuple_ty = try mod.intern_pool.getAnonStructType(mod.gpa, .{
.types = types,
.values = values,
.names = &.{},
} });
});
const runtime_src = opt_runtime_src orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
@ -15217,6 +15218,7 @@ fn zirOverflowArithmetic(
const lhs_ty = sema.typeOf(uncasted_lhs);
const rhs_ty = sema.typeOf(uncasted_rhs);
const mod = sema.mod;
const ip = &mod.intern_pool;
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
@ -15244,7 +15246,7 @@ fn zirOverflowArithmetic(
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty);
const overflow_ty = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type.types[1].toType();
const overflow_ty = ip.indexToKey(tuple_ty.toIntern()).anon_struct_type.types.get(ip)[1].toType();
var result: struct {
inst: Air.Inst.Ref = .none,
@ -15418,6 +15420,7 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value {
fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const mod = sema.mod;
const ip = &mod.intern_pool;
const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{
.len = ty.vectorLen(mod),
.child = .u1_type,
@ -15425,11 +15428,11 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type {
const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() };
const values = [2]InternPool.Index{ .none, .none };
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
const tuple_ty = try ip.getAnonStructType(mod.gpa, .{
.types = &types,
.values = &values,
.names = &.{},
} });
});
return tuple_ty.toType();
}
@ -17578,15 +17581,15 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len);
for (struct_field_vals, 0..) |*struct_field_val, i| {
const anon_struct_type = ip.indexToKey(ty.toIntern()).anon_struct_type;
const field_ty = anon_struct_type.types[i];
const field_val = anon_struct_type.values[i];
const field_ty = anon_struct_type.types.get(ip)[i];
const field_val = anon_struct_type.values.get(ip)[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
// TODO: write something like getCoercedInts to avoid needing to dupe
const bytes = if (tuple.names.len != 0)
// https://github.com/ziglang/zig/issues/15709
try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).anon_struct_type.names[i]))
try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip)[i]))
else
try std.fmt.allocPrint(sema.arena, "{d}", .{i});
const new_decl_ty = try mod.arrayType(.{
@ -19254,7 +19257,7 @@ fn finishStructInit(
switch (ip.indexToKey(struct_ty.toIntern())) {
.anon_struct_type => |anon_struct| {
for (anon_struct.values, 0..) |default_val, i| {
for (anon_struct.values.get(ip), 0..) |default_val, i| {
if (field_inits[i] != .none) continue;
if (default_val == .none) {
@ -19266,7 +19269,7 @@ fn finishStructInit(
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
} else {
const field_name = anon_struct.names[i];
const field_name = anon_struct.names.get(ip)[i];
const template = "missing struct field: {}";
const args = .{field_name.fmt(ip)};
if (root_msg) |msg| {
@ -19395,6 +19398,7 @@ fn structInitAnon(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const zir_datas = sema.code.instructions.items(.data);
const types = try sema.arena.alloc(InternPool.Index, extra_data.fields_len);
@ -19465,11 +19469,11 @@ fn structInitAnon(
break :rs runtime_index;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
const tuple_ty = try ip.getAnonStructType(gpa, .{
.names = fields.keys(),
.types = types,
.values = values,
} });
});
const runtime_index = opt_runtime_index orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
@ -19688,6 +19692,8 @@ fn arrayInitAnon(
is_ref: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const types = try sema.arena.alloc(InternPool.Index, operands.len);
const values = try sema.arena.alloc(InternPool.Index, operands.len);
@ -19701,7 +19707,7 @@ fn arrayInitAnon(
if (types[i].toType().zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa);
errdefer msg.destroy(gpa);
try sema.addDeclaredHereNote(msg, types[i].toType());
break :msg msg;
@ -19718,11 +19724,11 @@ fn arrayInitAnon(
break :rs runtime_src;
};
const tuple_ty = try mod.intern(.{ .anon_struct_type = .{
const tuple_ty = try ip.getAnonStructType(gpa, .{
.types = types,
.values = values,
.names = &.{},
} });
});
const runtime_src = opt_runtime_src orelse {
const tuple_val = try mod.intern(.{ .aggregate = .{
@ -19832,7 +19838,7 @@ fn fieldType(
.Struct => switch (ip.indexToKey(cur_ty.toIntern())) {
.anon_struct_type => |anon_struct| {
const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src);
return Air.internedToRef(anon_struct.types[field_index]);
return Air.internedToRef(anon_struct.types.get(ip)[field_index]);
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
@ -30574,13 +30580,14 @@ fn coerceAnonStructToUnion(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const inst_ty = sema.typeOf(inst);
const field_info: union(enum) {
name: InternPool.NullTerminatedString,
count: usize,
} = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) {
} = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1)
.{ .name = anon_struct_type.names[0] }
.{ .name = anon_struct_type.names.get(ip)[0] }
else
.{ .count = anon_struct_type.names.len },
.struct_type => |struct_type| name: {
@ -30876,7 +30883,7 @@ fn coerceTupleToStruct(
// https://github.com/ziglang/zig/issues/15709
const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0)
anon_struct_type.names[field_i]
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
@ -30994,7 +31001,7 @@ fn coerceTupleToTuple(
// https://github.com/ziglang/zig/issues/15709
const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0)
anon_struct_type.names[field_i]
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
@ -31005,12 +31012,12 @@ fn coerceTupleToTuple(
return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{});
const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(),
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize].toType(),
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty,
else => unreachable,
};
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize],
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize],
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val,
else => unreachable,
};
@ -31048,7 +31055,7 @@ fn coerceTupleToTuple(
if (field_ref.* != .none) continue;
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values[i],
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i],
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val,
else => unreachable,
};
@ -32855,6 +32862,7 @@ fn resolvePeerTypesInner(
peer_vals: []?Value,
) !PeerResolveResult {
const mod = sema.mod;
const ip = &mod.intern_pool;
var strat_reason: usize = 0;
var s: PeerResolveStrategy = .unknown;
@ -32912,7 +32920,7 @@ fn resolvePeerTypesInner(
.ErrorUnion => blk: {
const set_ty = ty.errorUnionSet(mod);
ty_ptr.* = ty.errorUnionPayload(mod);
if (val_ptr.*) |eu_val| switch (mod.intern_pool.indexToKey(eu_val.toIntern())) {
if (val_ptr.*) |eu_val| switch (ip.indexToKey(eu_val.toIntern())) {
.error_union => |eu| switch (eu.val) {
.payload => |payload_ip| val_ptr.* = payload_ip.toValue(),
.err_name => val_ptr.* = null,
@ -33166,8 +33174,8 @@ fn resolvePeerTypesInner(
}).toIntern();
if (ptr_info.sentinel != .none and peer_info.sentinel != .none) {
const peer_sent = try mod.intern_pool.getCoerced(sema.gpa, ptr_info.sentinel, ptr_info.child);
const ptr_sent = try mod.intern_pool.getCoerced(sema.gpa, peer_info.sentinel, ptr_info.child);
const peer_sent = try ip.getCoerced(sema.gpa, ptr_info.sentinel, ptr_info.child);
const ptr_sent = try ip.getCoerced(sema.gpa, peer_info.sentinel, ptr_info.child);
if (ptr_sent == peer_sent) {
ptr_info.sentinel = ptr_sent;
} else {
@ -33278,7 +33286,7 @@ fn resolvePeerTypesInner(
ptr_info.flags.is_volatile = ptr_info.flags.is_volatile or peer_info.flags.is_volatile;
const peer_sentinel: InternPool.Index = switch (peer_info.flags.size) {
.One => switch (mod.intern_pool.indexToKey(peer_info.child)) {
.One => switch (ip.indexToKey(peer_info.child)) {
.array_type => |array_type| array_type.sentinel,
else => .none,
},
@ -33287,7 +33295,7 @@ fn resolvePeerTypesInner(
};
const cur_sentinel: InternPool.Index = switch (ptr_info.flags.size) {
.One => switch (mod.intern_pool.indexToKey(ptr_info.child)) {
.One => switch (ip.indexToKey(ptr_info.child)) {
.array_type => |array_type| array_type.sentinel,
else => .none,
},
@ -33449,7 +33457,7 @@ fn resolvePeerTypesInner(
}
const sentinel_ty = switch (ptr_info.flags.size) {
.One => switch (mod.intern_pool.indexToKey(ptr_info.child)) {
.One => switch (ip.indexToKey(ptr_info.child)) {
.array_type => |array_type| array_type.child,
else => ptr_info.child,
},
@ -33460,11 +33468,11 @@ fn resolvePeerTypesInner(
no_sentinel: {
if (peer_sentinel == .none) break :no_sentinel;
if (cur_sentinel == .none) break :no_sentinel;
const peer_sent_coerced = try mod.intern_pool.getCoerced(sema.gpa, peer_sentinel, sentinel_ty);
const cur_sent_coerced = try mod.intern_pool.getCoerced(sema.gpa, cur_sentinel, sentinel_ty);
const peer_sent_coerced = try ip.getCoerced(sema.gpa, peer_sentinel, sentinel_ty);
const cur_sent_coerced = try ip.getCoerced(sema.gpa, cur_sentinel, sentinel_ty);
if (peer_sent_coerced != cur_sent_coerced) break :no_sentinel;
// Sentinels match
if (ptr_info.flags.size == .One) switch (mod.intern_pool.indexToKey(ptr_info.child)) {
if (ptr_info.flags.size == .One) switch (ip.indexToKey(ptr_info.child)) {
.array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
.len = array_type.len,
.child = array_type.child,
@ -33478,7 +33486,7 @@ fn resolvePeerTypesInner(
}
// Clear existing sentinel
ptr_info.sentinel = .none;
switch (mod.intern_pool.indexToKey(ptr_info.child)) {
switch (ip.indexToKey(ptr_info.child)) {
.array_type => |array_type| ptr_info.child = (try mod.arrayType(.{
.len = array_type.len,
.child = array_type.child,
@ -33501,7 +33509,7 @@ fn resolvePeerTypesInner(
.peer_idx_a = first_idx,
.peer_idx_b = other_idx,
} },
else => switch (mod.intern_pool.indexToKey(pointee)) {
else => switch (ip.indexToKey(pointee)) {
.array_type => |array_type| if (array_type.child == .noreturn_type) return .{ .conflict = .{
.peer_idx_a = first_idx,
.peer_idx_b = other_idx,
@ -33785,7 +33793,7 @@ fn resolvePeerTypesInner(
is_tuple = ty.isTuple(mod);
field_count = ty.structFieldCount(mod);
if (!is_tuple) {
const names = mod.intern_pool.indexToKey(ty.toIntern()).anon_struct_type.names;
const names = ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip);
field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names);
}
continue;
@ -33839,7 +33847,7 @@ fn resolvePeerTypesInner(
result_buf.* = result;
const field_name = if (is_tuple) name: {
break :name try std.fmt.allocPrint(sema.arena, "{d}", .{field_idx});
} else try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(field_names[field_idx]));
} else try sema.arena.dupe(u8, ip.stringToSlice(field_names[field_idx]));
// The error info needs the field types, but we can't reuse sub_peer_tys
// since the recursive call may have clobbered it.
@ -33892,11 +33900,11 @@ fn resolvePeerTypesInner(
field_val.* = if (comptime_val) |v| v.toIntern() else .none;
}
const final_ty = try mod.intern(.{ .anon_struct_type = .{
const final_ty = try ip.getAnonStructType(mod.gpa, .{
.types = field_types,
.names = if (is_tuple) &.{} else field_names,
.values = field_vals,
} });
});
return .{ .success = final_ty.toType() };
},
@ -34491,6 +34499,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
/// be resolved.
pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Pointer => {
return sema.resolveTypeFully(ty.childType(mod));
@ -34498,7 +34507,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
.Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => return sema.resolveStructFully(ty),
.anon_struct_type => |tuple| {
for (tuple.types) |field_ty| {
for (tuple.types.get(ip)) |field_ty| {
try sema.resolveTypeFully(field_ty.toType());
}
},
@ -34518,7 +34527,6 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void {
// the function is instantiated.
return;
}
const ip = &mod.intern_pool;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
try sema.resolveTypeFully(param_ty.toType());
@ -36133,7 +36141,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
},
.anon_struct_type => |tuple| {
for (tuple.values) |val| {
for (tuple.values.get(ip)) |val| {
if (val == .none) return null;
}
// In this case the struct has all comptime-known fields and
@ -36141,7 +36149,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
// TODO: write something like getCoercedInts to avoid needing to dupe
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values) },
.storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values.get(ip)) },
} })).toValue();
},
@ -36611,7 +36619,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
}
},
.anon_struct_type => |tuple| {
for (tuple.types, tuple.values) |field_ty, val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none;
if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) {
return true;
@ -36784,8 +36792,9 @@ fn anonStructFieldIndex(
field_src: LazySrcLoc,
) !u32 {
const mod = sema.mod;
switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| {
const ip = &mod.intern_pool;
switch (ip.indexToKey(struct_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
if (name == field_name) return @intCast(i);
},
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
@ -36798,7 +36807,7 @@ fn anonStructFieldIndex(
else => unreachable,
}
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
field_name.fmt(&mod.intern_pool), struct_ty.fmt(sema.mod),
field_name.fmt(ip), struct_ty.fmt(sema.mod),
});
}

View File

@ -423,6 +423,7 @@ fn printAggregate(
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const ip = &mod.intern_pool;
if (ty.zigTypeTag(mod) == .Struct) {
try writer.writeAll(".{");
const max_len = @min(ty.structFieldCount(mod), max_aggregate_items);
@ -430,13 +431,13 @@ fn printAggregate(
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const field_name = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const field_name = switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(),
.anon_struct_type => |x| if (x.isTuple()) .none else x.names[i].toOptional(),
.anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
else => unreachable,
};
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(&mod.intern_pool)});
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(ip)});
try print(.{
.ty = ty.structFieldType(i, mod),
.val = try val.fieldValue(mod, i),

View File

@ -438,7 +438,11 @@ pub fn generateSymbol(
},
.anon_struct_type => |tuple| {
const struct_begin = code.items.len;
for (tuple.types, tuple.values, 0..) |field_ty, comptime_val, index| {
for (
tuple.types.get(ip),
tuple.values.get(ip),
0..,
) |field_ty, comptime_val, index| {
if (comptime_val != .none) continue;
if (!field_ty.toType().hasRuntimeBits(mod)) continue;

View File

@ -1275,7 +1275,11 @@ pub const DeclGen = struct {
try writer.writeByte('{');
var empty = true;
for (tuple.types, tuple.values, 0..) |field_ty, comptime_ty, field_i| {
for (
tuple.types.get(ip),
tuple.values.get(ip),
0..,
) |field_ty, comptime_ty, field_i| {
if (comptime_ty != .none) continue;
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
@ -7745,16 +7749,18 @@ fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type {
if (ret_ty.ip_index == .noreturn_type) return Type.noreturn;
if (lowersToArray(ret_ty, mod)) {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
const names = [1]InternPool.NullTerminatedString{
try mod.intern_pool.getOrPutString(mod.gpa, "array"),
try ip.getOrPutString(gpa, "array"),
};
const types = [1]InternPool.Index{ret_ty.ip_index};
const values = [1]InternPool.Index{.none};
const interned = try mod.intern(.{ .anon_struct_type = .{
const interned = try ip.getAnonStructType(gpa, .{
.names = &names,
.types = &types,
.values = &values,
} });
});
return interned.toType();
}

View File

@ -2392,7 +2392,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
const field_size = field_ty.toType().abiSize(mod);
@ -2401,7 +2401,7 @@ pub const Object = struct {
offset = field_offset + field_size;
const field_name = if (tuple.names.len != 0)
ip.stringToSlice(tuple.names[i])
ip.stringToSlice(tuple.names.get(ip)[i])
else
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
defer if (tuple.names.len == 0) gpa.free(field_name);
@ -3325,7 +3325,10 @@ pub const Object = struct {
var offset: u64 = 0;
var big_align: u32 = 0;
for (anon_struct_type.types, anon_struct_type.values) |field_ty, field_val| {
for (
anon_struct_type.types.get(ip),
anon_struct_type.values.get(ip),
) |field_ty, field_val| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
const field_align = field_ty.toType().abiAlignment(mod);
@ -3874,7 +3877,11 @@ pub const Object = struct {
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, field_index| {
for (
tuple.types.get(ip),
tuple.values.get(ip),
0..,
) |field_ty, field_val, field_index| {
if (field_val != .none) continue;
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
@ -10537,10 +10544,11 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
var offset: u64 = 0;
var big_align: u32 = 0;
const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var llvm_field_index: c_uint = 0;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
const field_align = field_ty.toType().abiAlignment(mod);
@ -11118,6 +11126,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
// For tuples and structs, if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
const max_fields_byval = 0;
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Type,
@ -11146,10 +11155,10 @@ fn isByRef(ty: Type, mod: *Module) bool {
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout(mod) == .Packed) return false;
const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var count: usize = 0;
for (tuple.types, tuple.values) |field_ty, field_val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
count += 1;

View File

@ -1227,6 +1227,7 @@ pub const DeclGen = struct {
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
const mod = self.module;
const ip = &mod.intern_pool;
log.debug("resolveType: ty = {}", .{ty.fmt(self.module)});
const target = self.getTarget();
switch (ty.zigTypeTag(mod)) {
@ -1271,7 +1272,6 @@ pub const DeclGen = struct {
},
.Fn => switch (repr) {
.direct => {
const ip = &mod.intern_pool;
const fn_info = mod.typeToFunc(ty).?;
// TODO: Put this somewhere in Sema.zig
if (fn_info.is_var_args)
@ -1333,13 +1333,13 @@ pub const DeclGen = struct {
} });
},
.Struct => {
const struct_ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const struct_ty = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
const member_types = try self.gpa.alloc(CacheRef, tuple.values.len);
defer self.gpa.free(member_types);
var member_index: usize = 0;
for (tuple.types, tuple.values) |field_ty, field_val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
member_types[member_index] = try self.resolveType(field_ty.toType(), .indirect);
@ -1369,12 +1369,12 @@ pub const DeclGen = struct {
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const index = field_and_index.index;
const field_name = mod.intern_pool.stringToSlice(struct_obj.fields.keys()[index]);
const field_name = ip.stringToSlice(struct_obj.fields.keys()[index]);
try member_types.append(try self.resolveType(field.ty, .indirect));
try member_names.append(try self.spv.resolveString(field_name));
}
const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(self.module));
const name = ip.stringToSlice(try struct_obj.getFullyQualifiedName(self.module));
return try self.spv.resolve(.{ .struct_type = .{
.name = try self.spv.resolveString(name),

View File

@ -327,7 +327,7 @@ pub const DeclState = struct {
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
for (fields.types, 0..) |field_ty, field_index| {
for (fields.types.get(ip), 0..) |field_ty, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string

View File

@ -170,7 +170,8 @@ pub const Type = struct {
/// Prints a name suitable for `@typeName`.
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
const sign_char: u8 = switch (int_type.signedness) {
.signed => 'i',
@ -257,7 +258,6 @@ pub const Type = struct {
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
.error_set_type => |error_set_type| {
const ip = &mod.intern_pool;
const names = error_set_type.names;
try writer.writeAll("error{");
for (names.get(ip), 0..) |name, i| {
@ -330,13 +330,13 @@ pub const Type = struct {
return writer.writeAll("@TypeOf(.{})");
}
try writer.writeAll("struct{");
for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| {
for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| {
if (i != 0) try writer.writeAll(", ");
if (val != .none) {
try writer.writeAll("comptime ");
}
if (anon_struct.names.len != 0) {
try writer.print("{}: ", .{anon_struct.names[i].fmt(&mod.intern_pool)});
try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&mod.intern_pool)});
}
try print(field_ty.toType(), writer, mod);
@ -587,7 +587,7 @@ pub const Type = struct {
}
},
.anon_struct_type => |tuple| {
for (tuple.types, tuple.values) |field_ty, val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
if (try field_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true;
}
@ -1055,7 +1055,7 @@ pub const Type = struct {
},
.anon_struct_type => |tuple| {
var big_align: u32 = 0;
for (tuple.types, tuple.values) |field_ty, val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
if (!(field_ty.toType().hasRuntimeBits(mod))) continue;
@ -2155,7 +2155,7 @@ pub const Type = struct {
pub fn vectorLen(ty: Type, mod: *const Module) u32 {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
.anon_struct_type => |tuple| @as(u32, @intCast(tuple.types.len)),
.anon_struct_type => |tuple| @intCast(tuple.types.len),
else => unreachable,
};
}
@ -2536,13 +2536,13 @@ pub const Type = struct {
},
.anon_struct_type => |tuple| {
for (tuple.values) |val| {
for (tuple.values.get(ip)) |val| {
if (val == .none) return null;
}
// In this case the struct has all comptime-known fields and
// therefore has one possible value.
// TODO: write something like getCoercedInts to avoid needing to dupe
const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values);
const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values.get(ip));
defer mod.gpa.free(duped_values);
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@ -2732,7 +2732,7 @@ pub const Type = struct {
},
.anon_struct_type => |tuple| {
for (tuple.types, tuple.values) |field_ty, val| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
const have_comptime_val = val != .none;
if (!have_comptime_val and field_ty.toType().comptimeOnly(mod)) return true;
}
@ -2996,13 +2996,14 @@ pub const Type = struct {
}
pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveFieldTypes());
return struct_obj.fields.keys()[field_index];
},
.anon_struct_type => |anon_struct| anon_struct.names[field_index],
.anon_struct_type => |anon_struct| anon_struct.names.get(ip)[field_index],
else => unreachable,
};
}
@ -3032,7 +3033,7 @@ pub const Type = struct {
const union_obj = ip.loadUnionType(union_type);
return union_obj.field_types.get(ip)[index].toType();
},
.anon_struct_type => |anon_struct| anon_struct.types[index].toType(),
.anon_struct_type => |anon_struct| anon_struct.types.get(ip)[index].toType(),
else => unreachable,
};
}
@ -3046,7 +3047,7 @@ pub const Type = struct {
return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
},
.anon_struct_type => |anon_struct| {
return anon_struct.types[index].toType().abiAlignment(mod);
return anon_struct.types.get(ip)[index].toType().abiAlignment(mod);
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
@ -3057,7 +3058,8 @@ pub const Type = struct {
}
pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const val = struct_obj.fields.values()[index].default_val;
@ -3066,7 +3068,7 @@ pub const Type = struct {
return val.toValue();
},
.anon_struct_type => |anon_struct| {
const val = anon_struct.values[index];
const val = anon_struct.values.get(ip)[index];
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
return val.toValue();
@ -3076,7 +3078,8 @@ pub const Type = struct {
}
pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const field = struct_obj.fields.values()[index];
@ -3087,9 +3090,9 @@ pub const Type = struct {
}
},
.anon_struct_type => |tuple| {
const val = tuple.values[index];
const val = tuple.values.get(ip)[index];
if (val == .none) {
return tuple.types[index].toType().onePossibleValue(mod);
return tuple.types.get(ip)[index].toType().onePossibleValue(mod);
} else {
return val.toValue();
}
@ -3099,14 +3102,15 @@ pub const Type = struct {
}
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) return false;
const field = struct_obj.fields.values()[index];
return field.is_comptime;
},
.anon_struct_type => |anon_struct| anon_struct.values[index] != .none,
.anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
else => unreachable,
};
}
@ -3202,7 +3206,7 @@ pub const Type = struct {
var offset: u64 = 0;
var big_align: u32 = 0;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) {
// comptime field
if (i == index) return offset;

View File

@ -268,6 +268,7 @@ pub const Value = struct {
pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern();
const ip = &mod.intern_pool;
switch (val.tag()) {
.eu_payload => {
const pl = val.castTag(.eu_payload).?.data;
@ -286,7 +287,7 @@ pub const Value = struct {
.slice => {
const pl = val.castTag(.slice).?.data;
const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod);
var ptr_key = mod.intern_pool.indexToKey(ptr).ptr;
var ptr_key = ip.indexToKey(ptr).ptr;
assert(ptr_key.len == .none);
ptr_key.ty = ty.toIntern();
ptr_key.len = try pl.len.intern(Type.usize, mod);
@ -311,11 +312,11 @@ pub const Value = struct {
const old_elems = val.castTag(.aggregate).?.data[0..len];
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
defer mod.gpa.free(new_elems);
const ty_key = mod.intern_pool.indexToKey(ty.toIntern());
const ty_key = ip.indexToKey(ty.toIntern());
for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i|
new_elem.* = try old_elem.intern(switch (ty_key) {
.struct_type => ty.structFieldType(field_i, mod),
.anon_struct_type => |info| info.types[field_i].toType(),
.anon_struct_type => |info| info.types.get(ip)[field_i].toType(),
inline .array_type, .vector_type => |info| info.child.toType(),
else => unreachable,
}, mod);