Value: add intern and unintern to facilitate code conversion

This allows some code (like struct initializers) to use interned types
while other code (such as comptime mutation) continues to use legacy
types.

With these changes, an `zig build-obj empty.zig` gets to a crash on
missing interned error union types.
This commit is contained in:
Jacob Young 2023-05-20 09:35:11 -04:00 committed by Andrew Kelley
parent be78a12d7d
commit 115c089562
6 changed files with 604 additions and 178 deletions

View File

@ -100,6 +100,16 @@ pub const MapIndex = enum(u32) {
} }
}; };
pub const RuntimeIndex = enum(u32) {
zero = 0,
comptime_field_ptr = std.math.maxInt(u32),
_,
pub fn increment(ri: *RuntimeIndex) void {
ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1);
}
};
/// An index into `string_bytes`. /// An index into `string_bytes`.
pub const NullTerminatedString = enum(u32) { pub const NullTerminatedString = enum(u32) {
_, _,
@ -478,11 +488,27 @@ pub const Key = union(enum) {
}; };
pub const Ptr = struct { pub const Ptr = struct {
/// This is the pointer type, not the element type.
ty: Index, ty: Index,
/// The value of the address that the pointer points to.
addr: Addr, addr: Addr,
/// This could be `none` if size is not a slice.
len: Index = .none,
pub const Addr = union(enum) { pub const Addr = union(enum) {
@"var": struct {
init: Index,
owner_decl: Module.Decl.Index,
lib_name: OptionalNullTerminatedString,
is_const: bool,
is_threadlocal: bool,
is_weak_linkage: bool,
},
decl: Module.Decl.Index, decl: Module.Decl.Index,
mut_decl: struct {
decl: Module.Decl.Index,
runtime_index: RuntimeIndex,
},
int: Index, int: Index,
}; };
}; };
@ -577,7 +603,9 @@ pub const Key = union(enum) {
// This is sound due to pointer provenance rules. // This is sound due to pointer provenance rules.
std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr));
switch (ptr.addr) { switch (ptr.addr) {
.@"var" => |@"var"| std.hash.autoHash(hasher, @"var".owner_decl),
.decl => |decl| std.hash.autoHash(hasher, decl), .decl => |decl| std.hash.autoHash(hasher, decl),
.mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl),
.int => |int| std.hash.autoHash(hasher, int), .int => |int| std.hash.autoHash(hasher, int),
} }
}, },
@ -697,7 +725,9 @@ pub const Key = union(enum) {
if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false;
return switch (a_info.addr) { return switch (a_info.addr) {
.@"var" => |a_var| a_var.owner_decl == b_info.addr.@"var".owner_decl,
.decl => |a_decl| a_decl == b_info.addr.decl, .decl => |a_decl| a_decl == b_info.addr.decl,
.mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl),
.int => |a_int| a_int == b_info.addr.int, .int => |a_int| a_int == b_info.addr.int,
}; };
}, },
@ -1330,6 +1360,12 @@ pub const Tag = enum(u8) {
/// A value that can be represented with only an enum tag. /// A value that can be represented with only an enum tag.
/// data is SimpleValue enum value. /// data is SimpleValue enum value.
simple_value, simple_value,
/// A pointer to a var.
/// data is extra index of PtrVal, which contains the type and address.
ptr_var,
/// A pointer to a decl that can be mutated at comptime.
/// data is extra index of PtrMutDecl, which contains the type and address.
ptr_mut_decl,
/// A pointer to a decl. /// A pointer to a decl.
/// data is extra index of PtrDecl, which contains the type and address. /// data is extra index of PtrDecl, which contains the type and address.
ptr_decl, ptr_decl,
@ -1338,6 +1374,11 @@ pub const Tag = enum(u8) {
/// Only pointer types are allowed to have this encoding. Optional types must use /// Only pointer types are allowed to have this encoding. Optional types must use
/// `opt_payload` or `opt_null`. /// `opt_payload` or `opt_null`.
ptr_int, ptr_int,
/// A slice.
/// data is extra index of PtrSlice, which contains the ptr and len values
/// In order to use this encoding, one must ensure that the `InternPool`
/// already contains the slice type corresponding to this payload.
ptr_slice,
/// An optional value that is non-null. /// An optional value that is non-null.
/// data is Index of the payload value. /// data is Index of the payload value.
/// In order to use this encoding, one must ensure that the `InternPool` /// In order to use this encoding, one must ensure that the `InternPool`
@ -1672,16 +1713,45 @@ pub const PackedU64 = packed struct(u64) {
} }
}; };
pub const PtrVar = struct {
ty: Index,
/// If flags.is_extern == true this is `none`.
init: Index,
owner_decl: Module.Decl.Index,
/// Library name if specified.
/// For example `extern "c" var stderrp = ...` would have 'c' as library name.
lib_name: OptionalNullTerminatedString,
flags: Flags,
pub const Flags = packed struct(u32) {
is_const: bool,
is_threadlocal: bool,
is_weak_linkage: bool,
unused: u29 = undefined,
};
};
pub const PtrDecl = struct { pub const PtrDecl = struct {
ty: Index, ty: Index,
decl: Module.Decl.Index, decl: Module.Decl.Index,
}; };
pub const PtrMutDecl = struct {
ty: Index,
decl: Module.Decl.Index,
runtime_index: RuntimeIndex,
};
pub const PtrInt = struct { pub const PtrInt = struct {
ty: Index, ty: Index,
addr: Index, addr: Index,
}; };
pub const PtrSlice = struct {
ptr: Index,
len: Index,
};
/// Trailing: Limb for every limbs_len /// Trailing: Limb for every limbs_len
pub const Int = struct { pub const Int = struct {
ty: Index, ty: Index,
@ -1994,6 +2064,30 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.val = payload_val, .val = payload_val,
} }; } };
}, },
.ptr_var => {
const info = ip.extraData(PtrVar, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .@"var" = .{
.init = info.init,
.owner_decl = info.owner_decl,
.lib_name = info.lib_name,
.is_const = info.flags.is_const,
.is_threadlocal = info.flags.is_threadlocal,
.is_weak_linkage = info.flags.is_weak_linkage,
} },
} };
},
.ptr_mut_decl => {
const info = ip.extraData(PtrMutDecl, data);
return .{ .ptr = .{
.ty = info.ty,
.addr = .{ .mut_decl = .{
.decl = info.decl,
.runtime_index = info.runtime_index,
} },
} };
},
.ptr_decl => { .ptr_decl => {
const info = ip.extraData(PtrDecl, data); const info = ip.extraData(PtrDecl, data);
return .{ .ptr = .{ return .{ .ptr = .{
@ -2008,6 +2102,18 @@ pub fn indexToKey(ip: InternPool, index: Index) Key {
.addr = .{ .int = info.addr }, .addr = .{ .int = info.addr },
} }; } };
}, },
.ptr_slice => {
const info = ip.extraData(PtrSlice, data);
const ptr = ip.indexToKey(info.ptr).ptr;
var ptr_ty = ip.indexToKey(ptr.ty);
assert(ptr_ty.ptr_type.size == .Many);
ptr_ty.ptr_type.size = .Slice;
return .{ .ptr = .{
.ty = ip.getAssumeExists(ptr_ty),
.addr = ptr.addr,
.len = info.len,
} };
},
.int_u8 => .{ .int = .{ .int_u8 => .{ .int = .{
.ty = .u8_type, .ty = .u8_type,
.storage = .{ .u64 = data }, .storage = .{ .u64 = data },
@ -2472,31 +2578,67 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.extern_func => @panic("TODO"), .extern_func => @panic("TODO"),
.ptr => |ptr| switch (ptr.addr) { .ptr => |ptr| switch (ip.items.items(.tag)[@enumToInt(ptr.ty)]) {
.decl => |decl| { .type_pointer => {
assert(ptr.ty != .none); assert(ptr.len == .none);
switch (ptr.addr) {
.@"var" => |@"var"| ip.items.appendAssumeCapacity(.{
.tag = .ptr_var,
.data = try ip.addExtra(gpa, PtrVar{
.ty = ptr.ty,
.init = @"var".init,
.owner_decl = @"var".owner_decl,
.lib_name = @"var".lib_name,
.flags = .{
.is_const = @"var".is_const,
.is_threadlocal = @"var".is_threadlocal,
.is_weak_linkage = @"var".is_weak_linkage,
},
}),
}),
.decl => |decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_decl,
.data = try ip.addExtra(gpa, PtrDecl{
.ty = ptr.ty,
.decl = decl,
}),
}),
.mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{
.tag = .ptr_mut_decl,
.data = try ip.addExtra(gpa, PtrMutDecl{
.ty = ptr.ty,
.decl = mut_decl.decl,
.runtime_index = mut_decl.runtime_index,
}),
}),
.int => |int| ip.items.appendAssumeCapacity(.{
.tag = .ptr_int,
.data = try ip.addExtra(gpa, PtrInt{
.ty = ptr.ty,
.addr = int,
}),
}),
}
},
.type_slice => {
assert(ptr.len != .none);
var new_key = key;
new_key.ptr.ty = @intToEnum(Index, ip.items.items(.data)[@enumToInt(ptr.ty)]);
new_key.ptr.len = .none;
const ptr_index = try get(ip, gpa, new_key);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{ ip.items.appendAssumeCapacity(.{
.tag = .ptr_decl, .tag = .ptr_slice,
.data = try ip.addExtra(gpa, PtrDecl{ .data = try ip.addExtra(gpa, PtrSlice{
.ty = ptr.ty, .ptr = ptr_index,
.decl = decl, .len = ptr.len,
}),
});
},
.int => |int| {
assert(ptr.ty != .none);
ip.items.appendAssumeCapacity(.{
.tag = .ptr_int,
.data = try ip.addExtra(gpa, PtrInt{
.ty = ptr.ty,
.addr = int,
}), }),
}); });
}, },
else => unreachable,
}, },
.opt => |opt| { .opt => |opt| {
assert(opt.ty != .none);
assert(ip.isOptionalType(opt.ty)); assert(ip.isOptionalType(opt.ty));
ip.items.appendAssumeCapacity(if (opt.val == .none) .{ ip.items.appendAssumeCapacity(if (opt.val == .none) .{
.tag = .opt_null, .tag = .opt_null,
@ -3087,11 +3229,15 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)),
MapIndex => @enumToInt(@field(extra, field.name)), MapIndex => @enumToInt(@field(extra, field.name)),
OptionalMapIndex => @enumToInt(@field(extra, field.name)), OptionalMapIndex => @enumToInt(@field(extra, field.name)),
RuntimeIndex => @enumToInt(@field(extra, field.name)),
NullTerminatedString => @enumToInt(@field(extra, field.name)),
OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)),
Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)),
TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)),
Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)),
Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)),
PtrVar.Flags => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type: " ++ @typeName(field.type)), else => @compileError("bad field type: " ++ @typeName(field.type)),
}); });
} }
@ -3149,11 +3295,15 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data:
Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32),
MapIndex => @intToEnum(MapIndex, int32), MapIndex => @intToEnum(MapIndex, int32),
OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), OptionalMapIndex => @intToEnum(OptionalMapIndex, int32),
RuntimeIndex => @intToEnum(RuntimeIndex, int32),
NullTerminatedString => @intToEnum(NullTerminatedString, int32),
OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32),
i32 => @bitCast(i32, int32), i32 => @bitCast(i32, int32),
Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32),
TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32),
Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32),
Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32),
PtrVar.Flags => @bitCast(PtrVar.Flags, int32),
else => @compileError("bad field type: " ++ @typeName(field.type)), else => @compileError("bad field type: " ++ @typeName(field.type)),
}; };
} }
@ -3274,7 +3424,7 @@ pub fn childType(ip: InternPool, i: Index) Index {
}; };
} }
/// Given a slice type, returns the type of the pointer field. /// Given a slice type, returns the type of the ptr field.
pub fn slicePtrType(ip: InternPool, i: Index) Index { pub fn slicePtrType(ip: InternPool, i: Index) Index {
switch (i) { switch (i) {
.const_slice_u8_type => return .manyptr_const_u8_type, .const_slice_u8_type => return .manyptr_const_u8_type,
@ -3288,10 +3438,29 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index {
} }
} }
/// Given a slice value, returns the value of the ptr field.
pub fn slicePtr(ip: InternPool, i: Index) Index {
const item = ip.items.get(@enumToInt(i));
switch (item.tag) {
.ptr_slice => return ip.extraData(PtrSlice, item.data).ptr,
else => unreachable, // not a slice value
}
}
/// Given a slice value, returns the value of the len field.
pub fn sliceLen(ip: InternPool, i: Index) Index {
const item = ip.items.get(@enumToInt(i));
switch (item.tag) {
.ptr_slice => return ip.extraData(PtrSlice, item.data).len,
else => unreachable, // not a slice value
}
}
/// Given an existing value, returns the same value but with the supplied type. /// Given an existing value, returns the same value but with the supplied type.
/// Only some combinations are allowed: /// Only some combinations are allowed:
/// * int <=> int /// * int <=> int
/// * int <=> enum /// * int <=> enum
/// * ptr <=> ptr
pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index {
switch (ip.indexToKey(val)) { switch (ip.indexToKey(val)) {
.int => |int| switch (ip.indexToKey(new_ty)) { .int => |int| switch (ip.indexToKey(new_ty)) {
@ -3305,6 +3474,13 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
// Assume new_ty is an integer type. // Assume new_ty is an integer type.
return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty); return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty);
}, },
.ptr => |ptr| switch (ip.indexToKey(new_ty)) {
.ptr_type => return ip.get(gpa, .{ .ptr = .{
.ty = new_ty,
.addr = ptr.addr,
} }),
else => unreachable,
},
else => unreachable, else => unreachable,
} }
} }
@ -3380,6 +3556,15 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre
return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional();
} }
pub fn isPointerType(ip: InternPool, ty: Index) bool {
const tags = ip.items.items(.tag);
if (ty == .none) return false;
return switch (tags[@enumToInt(ty)]) {
.type_pointer, .type_slice => true,
else => false,
};
}
pub fn isOptionalType(ip: InternPool, ty: Index) bool { pub fn isOptionalType(ip: InternPool, ty: Index) bool {
const tags = ip.items.items(.tag); const tags = ip.items.items(.tag);
if (ty == .none) return false; if (ty == .none) return false;
@ -3485,8 +3670,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.undef => 0, .undef => 0,
.simple_type => 0, .simple_type => 0,
.simple_value => 0, .simple_value => 0,
.ptr_var => @sizeOf(PtrVar),
.ptr_decl => @sizeOf(PtrDecl), .ptr_decl => @sizeOf(PtrDecl),
.ptr_mut_decl => @sizeOf(PtrMutDecl),
.ptr_int => @sizeOf(PtrInt), .ptr_int => @sizeOf(PtrInt),
.ptr_slice => @sizeOf(PtrSlice),
.opt_null => 0, .opt_null => 0,
.opt_payload => 0, .opt_payload => 0,
.int_u8 => 0, .int_u8 => 0,

View File

@ -5762,7 +5762,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
// Crucially, this happens *after* we set the function state to success above, // Crucially, this happens *after* we set the function state to success above,
// so that dependencies on the function body will now be satisfied rather than // so that dependencies on the function body will now be satisfied rather than
// result in circular dependency errors. // result in circular dependency errors.
sema.resolveFnTypes(fn_ty_info) catch |err| switch (err) { sema.resolveFnTypes(mod.typeToFunc(fn_ty).?) catch |err| switch (err) {
error.NeededSourceLocation => unreachable, error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable, error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable, error.ComptimeReturn => unreachable,

View File

@ -13072,25 +13072,32 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// as zero-filling a byte array. // as zero-filling a byte array.
if (lhs_len == 1) { if (lhs_len == 1) {
const elem_val = try lhs_sub_val.elemValue(mod, 0); const elem_val = try lhs_sub_val.elemValue(mod, 0);
break :v try Value.Tag.repeated.create(sema.arena, elem_val); break :v try mod.intern(.{ .aggregate = .{
.ty = result_ty.ip_index,
.storage = .{ .repeated_elem = elem_val.ip_index },
} });
} }
const element_vals = try sema.arena.alloc(Value, final_len_including_sent); const element_vals = try sema.arena.alloc(InternPool.Index, final_len_including_sent);
var elem_i: usize = 0; var elem_i: usize = 0;
while (elem_i < result_len) { while (elem_i < result_len) {
var lhs_i: usize = 0; var lhs_i: usize = 0;
while (lhs_i < lhs_len) : (lhs_i += 1) { while (lhs_i < lhs_len) : (lhs_i += 1) {
const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); const elem_val = try lhs_sub_val.elemValue(mod, lhs_i);
element_vals[elem_i] = elem_val; assert(elem_val.ip_index != .none);
element_vals[elem_i] = elem_val.ip_index;
elem_i += 1; elem_i += 1;
} }
} }
if (lhs_info.sentinel) |sent_val| { if (lhs_info.sentinel) |sent_val| {
element_vals[result_len] = sent_val; element_vals[result_len] = sent_val.ip_index;
} }
break :v try Value.Tag.aggregate.create(sema.arena, element_vals); break :v try mod.intern(.{ .aggregate = .{
.ty = result_ty.ip_index,
.storage = .{ .elems = element_vals },
} });
}; };
return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); return sema.addConstantMaybeRef(block, result_ty, val.toValue(), ptr_addrspace != null);
} }
try sema.requireRuntimeBlock(block, src, lhs_src); try sema.requireRuntimeBlock(block, src, lhs_src);
@ -18111,12 +18118,16 @@ fn finishStructInit(
} else null; } else null;
const runtime_index = opt_runtime_index orelse { const runtime_index = opt_runtime_index orelse {
const values = try sema.arena.alloc(Value, field_inits.len); const elems = try sema.arena.alloc(InternPool.Index, field_inits.len);
for (field_inits, 0..) |field_init, i| { for (elems, field_inits, 0..) |*elem, field_init, field_i| {
values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?; elem.* = try (sema.resolveMaybeUndefVal(field_init) catch unreachable).?
.intern(struct_ty.structFieldType(field_i, mod), mod);
} }
const struct_val = try Value.Tag.aggregate.create(sema.arena, values); const struct_val = try mod.intern(.{ .aggregate = .{
return sema.addConstantMaybeRef(block, struct_ty, struct_val, is_ref); .ty = struct_ty.ip_index,
.storage = .{ .elems = elems },
} });
return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref);
}; };
if (is_ref) { if (is_ref) {
@ -18195,21 +18206,20 @@ fn zirStructInitAnon(
const init = try sema.resolveInst(item.data.init); const init = try sema.resolveInst(item.data.init);
field_ty.* = sema.typeOf(init).ip_index; field_ty.* = sema.typeOf(init).ip_index;
if (types[i].toType().zigTypeTag(mod) == .Opaque) { if (field_ty.toType().zigTypeTag(mod) == .Opaque) {
const msg = msg: { const msg = msg: {
const decl = sema.mod.declPtr(block.src_decl); const decl = sema.mod.declPtr(block.src_decl);
const field_src = mod.initSrc(src.node_offset.x, decl, i); const field_src = mod.initSrc(src.node_offset.x, decl, i);
const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(sema.gpa); errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, types[i].toType()); try sema.addDeclaredHereNote(msg, field_ty.toType());
break :msg msg; break :msg msg;
}; };
return sema.failWithOwnedErrorMsg(msg); return sema.failWithOwnedErrorMsg(msg);
} }
if (try sema.resolveMaybeUndefVal(init)) |init_val| { if (try sema.resolveMaybeUndefVal(init)) |init_val| {
assert(init_val.ip_index != .none); values[i] = try init_val.intern(field_ty.toType(), mod);
values[i] = init_val.ip_index;
} else { } else {
values[i] = .none; values[i] = .none;
runtime_index = i; runtime_index = i;
@ -24891,9 +24901,7 @@ fn structFieldVal(
if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
return sema.addConstant(field.ty, opv); return sema.addConstant(field.ty, opv);
} }
return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index));
const field_values = struct_val.castTag(.aggregate).?.data;
return sema.addConstant(field.ty, field_values[field_index]);
} }
try sema.requireRuntimeBlock(block, src, null); try sema.requireRuntimeBlock(block, src, null);
@ -27925,7 +27933,24 @@ fn beginComptimePtrMutation(
ptr_elem_ty, ptr_elem_ty,
parent.decl_ref_mut, parent.decl_ref_mut,
), ),
.repeated => {
const arena = parent.beginArena(sema.mod);
defer parent.finishArena(sema.mod);
const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod));
@memset(elems, val_ptr.castTag(.repeated).?.data);
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
return beginComptimePtrMutationInner(
sema,
block,
src,
parent.ty.structFieldType(field_index, mod),
&elems[field_index],
ptr_elem_ty,
parent.decl_ref_mut,
);
},
.@"union" => { .@"union" => {
// We need to set the active field of the union. // We need to set the active field of the union.
const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod);
@ -28107,6 +28132,13 @@ fn beginComptimePtrMutationInner(
const mod = sema.mod; const mod = sema.mod;
const target = mod.getTarget(); const target = mod.getTarget();
const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok;
const decl = mod.declPtr(decl_ref_mut.decl_index);
var decl_arena: std.heap.ArenaAllocator = undefined;
const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena);
defer decl.value_arena.?.release(&decl_arena);
decl_val.* = try decl_val.unintern(allocator, mod);
if (coerce_ok) { if (coerce_ok) {
return ComptimePtrMutationKit{ return ComptimePtrMutationKit{
.decl_ref_mut = decl_ref_mut, .decl_ref_mut = decl_ref_mut,
@ -28412,6 +28444,27 @@ fn beginComptimePtrLoad(
}, },
else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
.int => return error.RuntimeLoad, .int => return error.RuntimeLoad,
.ptr => |ptr| switch (ptr.addr) {
.@"var", .int => return error.RuntimeLoad,
.decl, .mut_decl => blk: {
const decl_index = switch (ptr.addr) {
.decl => |decl| decl,
.mut_decl => |mut_decl| mut_decl.decl,
else => unreachable,
};
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad;
const layout_defined = decl.ty.hasWellDefinedLayout(mod);
break :blk ComptimePtrLoadKit{
.parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null,
.pointee = decl_tv,
.is_mutable = false,
.ty_without_well_defined_layout = if (!layout_defined) decl.ty else null,
};
},
},
else => unreachable, else => unreachable,
}, },
}; };
@ -29425,7 +29478,7 @@ fn analyzeSlicePtr(
const result_ty = slice_ty.slicePtrFieldType(mod); const result_ty = slice_ty.slicePtrFieldType(mod);
if (try sema.resolveMaybeUndefVal(slice)) |val| { if (try sema.resolveMaybeUndefVal(slice)) |val| {
if (val.isUndef(mod)) return sema.addConstUndef(result_ty); if (val.isUndef(mod)) return sema.addConstUndef(result_ty);
return sema.addConstant(result_ty, val.slicePtr()); return sema.addConstant(result_ty, val.slicePtr(mod));
} }
try sema.requireRuntimeBlock(block, slice_src, null); try sema.requireRuntimeBlock(block, slice_src, null);
return block.addTyOp(.slice_ptr, result_ty, slice); return block.addTyOp(.slice_ptr, result_ty, slice);

View File

@ -566,7 +566,7 @@ pub const DeclGen = struct {
try writer.writeAll("){ .ptr = "); try writer.writeAll("){ .ptr = ");
} }
try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(), .Initializer); try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(mod), .Initializer);
const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); const len_val = try mod.intValue(Type.usize, val.sliceLen(mod));

View File

@ -3363,125 +3363,223 @@ pub const DeclGen = struct {
}, },
else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
.int => |int| return lowerIntAsPtr(dg, int), .int => |int| return lowerIntAsPtr(dg, int),
.ptr => |ptr| {
const ptr_val = switch (ptr.addr) {
.@"var" => |@"var"| ptr: {
const decl = dg.module.declPtr(@"var".owner_decl);
dg.module.markDeclAlive(decl);
const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target);
const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target);
const val = try dg.resolveGlobalDecl(@"var".owner_decl);
const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace)
val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace))
else
val;
break :ptr addrspace_casted_ptr;
},
.decl => |decl| try lowerDeclRefValue(dg, tv, decl),
.mut_decl => |mut_decl| try lowerDeclRefValue(dg, tv, mut_decl.decl),
.int => |int| lowerIntAsPtr(dg, mod.intern_pool.indexToKey(int).int),
};
switch (ptr.len) {
.none => return ptr_val,
else => {
const fields: [2]*llvm.Value = .{
ptr_val,
try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }),
};
return dg.context.constStruct(&fields, fields.len, .False);
},
}
},
else => unreachable, else => unreachable,
}, },
}, },
.Array => switch (tv.val.tag()) { .Array => switch (tv.val.ip_index) {
.bytes => { .none => switch (tv.val.tag()) {
const bytes = tv.val.castTag(.bytes).?.data; .bytes => {
return dg.context.constString( const bytes = tv.val.castTag(.bytes).?.data;
bytes.ptr, return dg.context.constString(
@intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), bytes.ptr,
.True, // Don't null terminate. Bytes has the sentinel, if any. @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
); .True, // Don't null terminate. Bytes has the sentinel, if any.
}, );
.str_lit => { },
const str_lit = tv.val.castTag(.str_lit).?.data; .str_lit => {
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const str_lit = tv.val.castTag(.str_lit).?.data;
if (tv.ty.sentinel(mod)) |sent_val| { const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (tv.ty.sentinel(mod)) |sent_val| {
if (byte == 0 and bytes.len > 0) { const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
if (byte == 0 and bytes.len > 0) {
return dg.context.constString(
bytes.ptr,
@intCast(c_uint, bytes.len),
.False, // Yes, null terminate.
);
}
var array = std.ArrayList(u8).init(dg.gpa);
defer array.deinit();
try array.ensureUnusedCapacity(bytes.len + 1);
array.appendSliceAssumeCapacity(bytes);
array.appendAssumeCapacity(byte);
return dg.context.constString(
array.items.ptr,
@intCast(c_uint, array.items.len),
.True, // Don't null terminate.
);
} else {
return dg.context.constString( return dg.context.constString(
bytes.ptr, bytes.ptr,
@intCast(c_uint, bytes.len), @intCast(c_uint, bytes.len),
.False, // Yes, null terminate. .True, // Don't null terminate. `bytes` has the sentinel, if any.
); );
} }
var array = std.ArrayList(u8).init(dg.gpa); },
defer array.deinit(); .aggregate => {
try array.ensureUnusedCapacity(bytes.len + 1); const elem_vals = tv.val.castTag(.aggregate).?.data;
array.appendSliceAssumeCapacity(bytes); const elem_ty = tv.ty.childType(mod);
array.appendAssumeCapacity(byte); const gpa = dg.gpa;
return dg.context.constString( const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod));
array.items.ptr, const llvm_elems = try gpa.alloc(*llvm.Value, len);
@intCast(c_uint, array.items.len), defer gpa.free(llvm_elems);
.True, // Don't null terminate. var need_unnamed = false;
); for (elem_vals[0..len], 0..) |elem_val, i| {
} else { llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
return dg.context.constString( need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
bytes.ptr,
@intCast(c_uint, bytes.len),
.True, // Don't null terminate. `bytes` has the sentinel, if any.
);
}
},
.aggregate => {
const elem_vals = tv.val.castTag(.aggregate).?.data;
const elem_ty = tv.ty.childType(mod);
const gpa = dg.gpa;
const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod));
const llvm_elems = try gpa.alloc(*llvm.Value, len);
defer gpa.free(llvm_elems);
var need_unnamed = false;
for (elem_vals[0..len], 0..) |elem_val, i| {
llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
}
if (need_unnamed) {
return dg.context.constStruct(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
.True,
);
} else {
const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
}
},
.repeated => {
const val = tv.val.castTag(.repeated).?.data;
const elem_ty = tv.ty.childType(mod);
const sentinel = tv.ty.sentinel(mod);
const len = @intCast(usize, tv.ty.arrayLen(mod));
const len_including_sent = len + @boolToInt(sentinel != null);
const gpa = dg.gpa;
const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
var need_unnamed = false;
if (len != 0) {
for (llvm_elems[0..len]) |*elem| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
} }
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); if (need_unnamed) {
} return dg.context.constStruct(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
.True,
);
} else {
const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
}
},
.repeated => {
const val = tv.val.castTag(.repeated).?.data;
const elem_ty = tv.ty.childType(mod);
const sentinel = tv.ty.sentinel(mod);
const len = @intCast(usize, tv.ty.arrayLen(mod));
const len_including_sent = len + @boolToInt(sentinel != null);
const gpa = dg.gpa;
const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
if (sentinel) |sent| { var need_unnamed = false;
llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); if (len != 0) {
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); for (llvm_elems[0..len]) |*elem| {
} elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
}
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
}
if (need_unnamed) { if (sentinel) |sent| {
return dg.context.constStruct( llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
llvm_elems.ptr, need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
@intCast(c_uint, llvm_elems.len), }
.True,
); if (need_unnamed) {
} else { return dg.context.constStruct(
const llvm_elem_ty = try dg.lowerType(elem_ty); llvm_elems.ptr,
return llvm_elem_ty.constArray( @intCast(c_uint, llvm_elems.len),
llvm_elems.ptr, .True,
@intCast(c_uint, llvm_elems.len), );
); } else {
} const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
}
},
.empty_array_sentinel => {
const elem_ty = tv.ty.childType(mod);
const sent_val = tv.ty.sentinel(mod).?;
const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val });
const llvm_elems: [1]*llvm.Value = .{sentinel};
const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
if (need_unnamed) {
return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True);
} else {
const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
}
},
else => unreachable,
}, },
.empty_array_sentinel => { else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
const elem_ty = tv.ty.childType(mod); .aggregate => |aggregate| switch (aggregate.storage) {
const sent_val = tv.ty.sentinel(mod).?; .elems => |elem_vals| {
const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); const elem_ty = tv.ty.childType(mod);
const llvm_elems: [1]*llvm.Value = .{sentinel}; const gpa = dg.gpa;
const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len);
if (need_unnamed) { defer gpa.free(llvm_elems);
return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); var need_unnamed = false;
} else { for (elem_vals, 0..) |elem_val, i| {
const llvm_elem_ty = try dg.lowerType(elem_ty); llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() });
return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]);
} }
if (need_unnamed) {
return dg.context.constStruct(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
.True,
);
} else {
const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
}
},
.repeated_elem => |val| {
const elem_ty = tv.ty.childType(mod);
const sentinel = tv.ty.sentinel(mod);
const len = @intCast(usize, tv.ty.arrayLen(mod));
const len_including_sent = len + @boolToInt(sentinel != null);
const gpa = dg.gpa;
const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
var need_unnamed = false;
if (len != 0) {
for (llvm_elems[0..len]) |*elem| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() });
}
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]);
}
if (sentinel) |sent| {
llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent });
need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]);
}
if (need_unnamed) {
return dg.context.constStruct(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
.True,
);
} else {
const llvm_elem_ty = try dg.lowerType(elem_ty);
return llvm_elem_ty.constArray(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
}
},
},
else => unreachable,
}, },
else => unreachable,
}, },
.Optional => { .Optional => {
comptime assert(optional_layout_version == 3); comptime assert(optional_layout_version == 3);
@ -3494,15 +3592,22 @@ pub const DeclGen = struct {
return non_null_bit; return non_null_bit;
} }
const llvm_ty = try dg.lowerType(tv.ty); const llvm_ty = try dg.lowerType(tv.ty);
if (tv.ty.optionalReprIsPayload(mod)) { if (tv.ty.optionalReprIsPayload(mod)) return switch (tv.val.ip_index) {
if (tv.val.castTag(.opt_payload)) |payload| { .none => if (tv.val.castTag(.opt_payload)) |payload|
return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); try dg.lowerValue(.{ .ty = payload_ty, .val = payload.data })
} else if (is_pl) { else if (is_pl)
return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }); try dg.lowerValue(.{ .ty = payload_ty, .val = tv.val })
} else { else
return llvm_ty.constNull(); llvm_ty.constNull(),
} .null_value => llvm_ty.constNull(),
} else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
.opt => |opt| switch (opt.val) {
.none => llvm_ty.constNull(),
else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }),
},
else => unreachable,
},
};
assert(payload_ty.zigTypeTag(mod) != .Fn); assert(payload_ty.zigTypeTag(mod) != .Fn);
const llvm_field_count = llvm_ty.countStructElementTypes(); const llvm_field_count = llvm_ty.countStructElementTypes();
@ -3589,7 +3694,6 @@ pub const DeclGen = struct {
}, },
.Struct => { .Struct => {
const llvm_struct_ty = try dg.lowerType(tv.ty); const llvm_struct_ty = try dg.lowerType(tv.ty);
const field_vals = tv.val.castTag(.aggregate).?.data;
const gpa = dg.gpa; const gpa = dg.gpa;
const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
@ -3623,7 +3727,7 @@ pub const DeclGen = struct {
const field_llvm_val = try dg.lowerValue(.{ const field_llvm_val = try dg.lowerValue(.{
.ty = field_ty.toType(), .ty = field_ty.toType(),
.val = field_vals[i], .val = try tv.val.fieldValue(field_ty.toType(), mod, i),
}); });
need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
@ -3669,13 +3773,12 @@ pub const DeclGen = struct {
comptime assert(Type.packed_struct_layout_version == 2); comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull(); var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0; var running_bits: u16 = 0;
for (field_vals, 0..) |field_val, i| { for (fields, 0..) |field, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try dg.lowerValue(.{ const non_int_val = try dg.lowerValue(.{
.ty = field.ty, .ty = field.ty,
.val = field_val, .val = try tv.val.fieldValue(field.ty, mod, i),
}); });
const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = dg.context.intType(ty_bit_size); const small_int_ty = dg.context.intType(ty_bit_size);
@ -3722,7 +3825,7 @@ pub const DeclGen = struct {
const field_llvm_val = try dg.lowerValue(.{ const field_llvm_val = try dg.lowerValue(.{
.ty = field.ty, .ty = field.ty,
.val = field_vals[field_and_index.index], .val = try tv.val.fieldValue(field.ty, mod, field_and_index.index),
}); });
need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
@ -3756,7 +3859,13 @@ pub const DeclGen = struct {
}, },
.Union => { .Union => {
const llvm_union_ty = try dg.lowerType(tv.ty); const llvm_union_ty = try dg.lowerType(tv.ty);
const tag_and_val = tv.val.castTag(.@"union").?.data; const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) {
.none => tv.val.castTag(.@"union").?.data,
else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
.un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() },
else => unreachable,
},
};
const layout = tv.ty.unionGetLayout(mod); const layout = tv.ty.unionGetLayout(mod);

View File

@ -602,6 +602,73 @@ pub const Value = struct {
return result; return result;
} }
pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index {
if (val.ip_index != .none) return val.ip_index;
switch (val.tag()) {
.slice => {
const pl = val.castTag(.slice).?.data;
const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod);
return mod.intern(.{ .ptr = .{
.ty = ty.ip_index,
.addr = mod.intern_pool.indexToKey(ptr).ptr.addr,
.len = try pl.len.intern(Type.usize, mod),
} });
},
.opt_payload => return mod.intern(.{ .opt = .{
.ty = ty.ip_index,
.val = try val.castTag(.opt_payload).?.data.intern(ty.childType(mod), mod),
} }),
.aggregate => {
const old_elems = val.castTag(.aggregate).?.data;
const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len);
defer mod.gpa.free(new_elems);
const ty_key = mod.intern_pool.indexToKey(ty.ip_index);
for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i|
new_elem.* = try old_elem.intern(switch (ty_key) {
.struct_type => ty.structFieldType(field_i, mod),
.anon_struct_type => |info| info.types[field_i].toType(),
inline .array_type, .vector_type => |info| info.child.toType(),
else => unreachable,
}, mod);
return mod.intern(.{ .aggregate = .{
.ty = ty.ip_index,
.storage = .{ .elems = new_elems },
} });
},
.repeated => return mod.intern(.{ .aggregate = .{
.ty = ty.ip_index,
.storage = .{ .repeated_elem = try val.castTag(.repeated).?.data.intern(
ty.structFieldType(0, mod),
mod,
) },
} }),
.@"union" => {
const pl = val.castTag(.@"union").?.data;
return mod.intern(.{ .un = .{
.ty = ty.ip_index,
.tag = try pl.tag.intern(ty.unionTagTypeHypothetical(mod), mod),
.val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod),
} });
},
else => unreachable,
}
}
pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value {
if (val.ip_index == .none) return val;
switch (mod.intern_pool.indexToKey(val.ip_index)) {
.aggregate => |aggregate| switch (aggregate.storage) {
.elems => |old_elems| {
const new_elems = try arena.alloc(Value, old_elems.len);
for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue();
return Tag.aggregate.create(arena, new_elems);
},
.repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()),
},
else => return val,
}
}
pub fn toIntern(val: Value) InternPool.Index { pub fn toIntern(val: Value) InternPool.Index {
assert(val.ip_index != .none); assert(val.ip_index != .none);
return val.ip_index; return val.ip_index;
@ -2002,11 +2069,11 @@ pub const Value = struct {
const ptr_ty = ty.slicePtrFieldType(mod); const ptr_ty = ty.slicePtrFieldType(mod);
const a_ptr = switch (a_ty.ptrSize(mod)) { const a_ptr = switch (a_ty.ptrSize(mod)) {
.Slice => a.slicePtr(), .Slice => a.slicePtr(mod),
.One => a, .One => a,
else => unreachable, else => unreachable,
}; };
return try eqlAdvanced(a_ptr, ptr_ty, b.slicePtr(), ptr_ty, mod, opt_sema); return try eqlAdvanced(a_ptr, ptr_ty, b.slicePtr(mod), ptr_ty, mod, opt_sema);
}, },
.Many, .C, .One => {}, .Many, .C, .One => {},
}, },
@ -2429,7 +2496,8 @@ pub const Value = struct {
} }
} }
pub fn slicePtr(val: Value) Value { pub fn slicePtr(val: Value, mod: *Module) Value {
if (val.ip_index != .none) return mod.intern_pool.slicePtr(val.ip_index).toValue();
return switch (val.tag()) { return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr, .slice => val.castTag(.slice).?.data.ptr,
// TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc. // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc.
@ -2439,6 +2507,7 @@ pub const Value = struct {
} }
pub fn sliceLen(val: Value, mod: *Module) u64 { pub fn sliceLen(val: Value, mod: *Module) u64 {
if (val.ip_index != .none) return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod);
return switch (val.tag()) { return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod),
.decl_ref => { .decl_ref => {
@ -2531,7 +2600,19 @@ pub const Value = struct {
else => unreachable, else => unreachable,
}, },
else => unreachable, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.ptr => |ptr| switch (ptr.addr) {
.@"var" => unreachable,
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
.mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index),
.int => unreachable,
},
.aggregate => |aggregate| switch (aggregate.storage) {
.elems => |elems| elems[index].toValue(),
.repeated_elem => |elem| elem.toValue(),
},
else => unreachable,
},
} }
} }
@ -2675,6 +2756,7 @@ pub const Value = struct {
} }
pub fn unionTag(val: Value, mod: *Module) Value { pub fn unionTag(val: Value, mod: *Module) Value {
if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag;
return switch (mod.intern_pool.indexToKey(val.ip_index)) { return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.undef, .enum_tag => val, .undef, .enum_tag => val,
.un => |un| un.tag.toValue(), .un => |un| un.tag.toValue(),
@ -2696,7 +2778,7 @@ pub const Value = struct {
else => val, else => val,
}; };
if (ptr_val.tag() == .elem_ptr) { if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
if (elem_ptr.elem_ty.eql(elem_ty, mod)) { if (elem_ptr.elem_ty.eql(elem_ty, mod)) {
return Tag.elem_ptr.create(arena, .{ return Tag.elem_ptr.create(arena, .{
@ -4809,10 +4891,12 @@ pub const Value = struct {
pub const base_tag = Tag.@"union"; pub const base_tag = Tag.@"union";
base: Payload = .{ .tag = base_tag }, base: Payload = .{ .tag = base_tag },
data: struct { data: Data,
pub const Data = struct {
tag: Value, tag: Value,
val: Value, val: Value,
}, };
}; };
}; };
@ -4844,15 +4928,7 @@ pub const Value = struct {
return if (x) one else zero; return if (x) one else zero;
} }
pub const RuntimeIndex = enum(u32) { pub const RuntimeIndex = InternPool.RuntimeIndex;
zero = 0,
comptime_field_ptr = std.math.maxInt(u32),
_,
pub fn increment(ri: *RuntimeIndex) void {
ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1);
}
};
/// This function is used in the debugger pretty formatters in tools/ to fetch the /// This function is used in the debugger pretty formatters in tools/ to fetch the
/// Tag to Payload mapping to facilitate fancy debug printing for this type. /// Tag to Payload mapping to facilitate fancy debug printing for this type.