InternPool: remove more legacy values

Reinstate some tags that will be needed for comptime init.
This commit is contained in:
Jacob Young 2023-05-25 05:47:25 -04:00 committed by Andrew Kelley
parent 6e0de1d116
commit 1a4626d2cf
19 changed files with 2104 additions and 2432 deletions

View File

@ -400,8 +400,6 @@ pub const Inst = struct {
/// A comptime-known value. Uses the `ty_pl` field, payload is index of
/// `values` array.
constant,
/// A comptime-known type. Uses the `ty` field.
const_ty,
/// A comptime-known value via an index into the InternPool.
/// Uses the `interned` field.
interned,
@ -1257,8 +1255,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type {
.error_set_has_value,
=> return Type.bool,
.const_ty => return Type.type,
.alloc,
.ret_ptr,
.err_return_trace,
@ -1435,7 +1431,6 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
return switch (air_tags[inst_index]) {
.const_ty => air_datas[inst_index].ty,
.interned => air_datas[inst_index].interned.toType(),
else => unreachable,
};
@ -1501,7 +1496,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value {
const air_datas = air.instructions.items(.data);
switch (air.instructions.items(.tag)[inst_index]) {
.constant => return air.values[air_datas[inst_index].ty_pl.payload],
.const_ty => unreachable,
.interned => return air_datas[inst_index].interned.toValue(),
else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod),
}
@ -1658,7 +1652,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool {
.cmp_vector,
.cmp_vector_optimized,
.constant,
.const_ty,
.interned,
.is_null,
.is_non_null,

View File

@ -553,10 +553,10 @@ pub const Key = union(enum) {
pub const Addr = union(enum) {
decl: Module.Decl.Index,
mut_decl: MutDecl,
comptime_field: Index,
int: Index,
eu_payload: Index,
opt_payload: Index,
comptime_field: Index,
elem: BaseIndex,
field: BaseIndex,
@ -703,24 +703,27 @@ pub const Key = union(enum) {
.aggregate => |aggregate| {
std.hash.autoHash(hasher, aggregate.ty);
switch (ip.indexToKey(aggregate.ty)) {
.array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) {
.bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte),
.elems => |elems| {
var buffer: Key.Int.Storage.BigIntSpace = undefined;
for (elems) |elem| std.hash.autoHash(
hasher,
ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
unreachable,
);
},
.repeated_elem => |elem| {
const len = ip.aggregateTypeLen(aggregate.ty);
var buffer: Key.Int.Storage.BigIntSpace = undefined;
const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
unreachable;
var i: u64 = 0;
while (i < len) : (i += 1) std.hash.autoHash(hasher, byte);
},
.array_type => |array_type| if (array_type.child == .u8_type) {
switch (aggregate.storage) {
.bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte),
.elems => |elems| {
var buffer: Key.Int.Storage.BigIntSpace = undefined;
for (elems) |elem| std.hash.autoHash(
hasher,
ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
unreachable,
);
},
.repeated_elem => |elem| {
const len = ip.aggregateTypeLen(aggregate.ty);
var buffer: Key.Int.Storage.BigIntSpace = undefined;
const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch
unreachable;
var i: u64 = 0;
while (i < len) : (i += 1) std.hash.autoHash(hasher, byte);
},
}
return;
},
else => {},
}
@ -2860,6 +2863,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.array_type => |array_type| {
assert(array_type.child != .none);
assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child);
if (std.math.cast(u32, array_type.len)) |len| {
if (array_type.sentinel == .none) {
@ -3230,7 +3234,23 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.int => |int| b: {
assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type);
switch (int.ty) {
.usize_type,
.isize_type,
.c_char_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.comptime_int_type,
=> {},
else => assert(ip.indexToKey(int.ty) == .int_type),
}
switch (int.storage) {
.u64, .i64, .big_int => {},
.lazy_align, .lazy_size => |lazy_ty| {

View File

@ -323,7 +323,6 @@ pub fn categorizeOperand(
.alloc,
.ret_ptr,
.constant,
.const_ty,
.interned,
.trap,
.breakpoint,
@ -975,7 +974,6 @@ fn analyzeInst(
=> return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }),
.constant,
.const_ty,
.interned,
=> unreachable,
@ -1272,7 +1270,7 @@ fn analyzeOperands(
// Don't compute any liveness for constants
switch (inst_tags[operand]) {
.constant, .const_ty, .interned => continue,
.constant, .interned => continue,
else => {},
}
@ -1308,7 +1306,7 @@ fn analyzeOperands(
// Don't compute any liveness for constants
switch (inst_tags[operand]) {
.constant, .const_ty, .interned => continue,
.constant, .interned => continue,
else => {},
}
@ -1842,7 +1840,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
// Don't compute any liveness for constants
const inst_tags = big.a.air.instructions.items(.tag);
switch (inst_tags[operand]) {
.constant, .const_ty, .interned => return,
.constant, .interned => return,
else => {},
}

View File

@ -43,7 +43,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.alloc,
.ret_ptr,
.constant,
.const_ty,
.interned,
.breakpoint,
.dbg_stmt,
@ -557,7 +556,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err
fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void {
const operand = Air.refToIndexAllowNone(op_ref) orelse return;
switch (self.air.instructions.items(.tag)[operand]) {
.constant, .const_ty, .interned => {},
.constant, .interned => {},
else => {
if (dies) {
if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand });
@ -579,7 +578,7 @@ fn verifyInst(
}
const tag = self.air.instructions.items(.tag);
switch (tag[inst]) {
.constant, .const_ty, .interned => unreachable,
.constant, .interned => unreachable,
else => {
if (self.liveness.isUnused(inst)) {
assert(!self.live.contains(inst));

View File

@ -85,20 +85,13 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
/// Keys are fully resolved file paths. This table owns the keys and values.
embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
/// This is a temporary addition to stage2 in order to match legacy behavior,
/// however the end-game once the lang spec is settled will be to use a global
/// InternPool for comptime memoized objects, making this behavior consistent across all types,
/// not only string literals. Or, we might decide to not guarantee string literals
/// to have equal comptime pointers, in which case this field can be deleted (perhaps
/// the commit that introduced it can simply be reverted).
/// This table uses an optional index so that when a Decl is destroyed, the string literal
/// is still reclaimable by a future Decl.
string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{},
string_literal_bytes: ArrayListUnmanaged(u8) = .{},
/// Stores all Type and Value objects; periodically garbage collected.
intern_pool: InternPool = .{},
/// This is currently only used for string literals, however the end-game once the lang spec
/// is settled will be to make this behavior consistent across all types.
memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
@ -208,39 +201,6 @@ pub const CImportError = struct {
}
};
pub const StringLiteralContext = struct {
bytes: *ArrayListUnmanaged(u8),
pub const Key = struct {
index: u32,
len: u32,
};
pub fn eql(self: @This(), a: Key, b: Key) bool {
_ = self;
return a.index == b.index and a.len == b.len;
}
pub fn hash(self: @This(), x: Key) u64 {
const x_slice = self.bytes.items[x.index..][0..x.len];
return std.hash_map.hashString(x_slice);
}
};
pub const StringLiteralAdapter = struct {
bytes: *ArrayListUnmanaged(u8),
pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool {
const b_slice = self.bytes.items[b.index..][0..b.len];
return mem.eql(u8, a_slice, b_slice);
}
pub fn hash(self: @This(), adapted_key: []const u8) u64 {
_ = self;
return std.hash_map.hashString(adapted_key);
}
};
const MonomorphedFuncsSet = std.HashMapUnmanaged(
Fn.Index,
void,
@ -660,14 +620,8 @@ pub const Decl = struct {
}
mod.destroyFunc(func);
}
_ = mod.memoized_decls.remove(decl.val.ip_index);
if (decl.value_arena) |value_arena| {
if (decl.owns_tv) {
if (decl.val.castTag(.str_lit)) |str_lit| {
mod.string_literal_table.getPtrContext(str_lit.data, .{
.bytes = &mod.string_literal_bytes,
}).?.* = .none;
}
}
value_arena.deinit(gpa);
decl.value_arena = null;
decl.has_tv = false;
@ -834,7 +788,7 @@ pub const Decl = struct {
pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
if (!decl.owns_tv) return .none;
if (decl.val.ip_index == .none) return .none;
return mod.intern_pool.indexToStructType(decl.val.ip_index);
return mod.intern_pool.indexToStructType(decl.val.toIntern());
}
/// If the Decl has a value and it is a union, return it,
@ -875,7 +829,7 @@ pub const Decl = struct {
return switch (decl.val.ip_index) {
.empty_struct_type => .none,
.none => .none,
else => switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
.union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(),
@ -919,7 +873,7 @@ pub const Decl = struct {
pub fn isExtern(decl: Decl, mod: *Module) bool {
assert(decl.has_tv);
return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) {
.variable => |variable| variable.is_extern,
.extern_func => true,
else => false,
@ -1577,11 +1531,11 @@ pub const Fn = struct {
ip: *InternPool,
gpa: Allocator,
) !void {
switch (err_set_ty.ip_index) {
switch (err_set_ty.toIntern()) {
.anyerror_type => {
self.is_anyerror = true;
},
else => switch (ip.indexToKey(err_set_ty.ip_index)) {
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
try self.errors.put(gpa, name, {});
@ -3396,8 +3350,7 @@ pub fn deinit(mod: *Module) void {
mod.namespaces_free_list.deinit(gpa);
mod.allocated_namespaces.deinit(gpa);
mod.string_literal_table.deinit(gpa);
mod.string_literal_bytes.deinit(gpa);
mod.memoized_decls.deinit(gpa);
mod.intern_pool.deinit(gpa);
}
@ -4702,7 +4655,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
return true;
}
if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| {
if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| {
const func = mod.funcPtr(func_index);
const owns_tv = func.owner_decl == decl_index;
if (owns_tv) {
@ -4749,10 +4702,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
decl.owns_tv = false;
var queue_linker_work = false;
var is_extern = false;
switch (decl_tv.val.ip_index) {
switch (decl_tv.val.toIntern()) {
.generic_poison => unreachable,
.unreachable_value => unreachable,
else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) {
else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
.variable => |variable| if (variable.decl == decl_index) {
decl.owns_tv = true;
queue_linker_work = true;
@ -4792,7 +4745,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr;
};
decl.@"addrspace" = blk: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) {
const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) {
.variable => .variable,
.extern_func, .func => .function,
else => .constant,
@ -6497,40 +6450,33 @@ pub fn populateTestFunctions(
const array_decl_index = d: {
// Add mod.test_functions to an array decl then make the test_functions
// decl reference it as a slice.
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
const arena = new_decl_arena.allocator();
const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
.ty = try mod.arrayType(.{
.len = test_fn_vals.len,
.child = test_fn_ty.ip_index,
.sentinel = .none,
}),
.val = try Value.Tag.aggregate.create(arena, test_fn_vals),
});
const array_decl = mod.declPtr(array_decl_index);
const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count());
defer gpa.free(test_fn_vals);
// Add a dependency on each test name and function pointer.
try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){};
defer array_decl_dependencies.deinit(gpa);
try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2);
for (mod.test_functions.keys(), 0..) |test_decl_index, i| {
for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| {
const test_decl = mod.declPtr(test_decl_index);
const test_name_slice = mem.sliceTo(test_decl.name, 0);
const test_name_decl_index = n: {
var name_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer name_decl_arena.deinit();
const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice);
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
.ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }),
.val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes),
const test_decl_name = mem.span(test_decl.name);
const test_name_decl_ty = try mod.arrayType(.{
.len = test_decl_name.len,
.child = .u8_type,
});
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
.ty = test_name_decl_ty,
.val = (try mod.intern(.{ .aggregate = .{
.ty = test_name_decl_ty.toIntern(),
.storage = .{ .bytes = test_decl_name },
} })).toValue(),
});
try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena);
break :n test_name_decl_index;
};
array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal);
array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal);
array_decl_dependencies.appendAssumeCapacity(test_decl_index);
array_decl_dependencies.appendAssumeCapacity(test_name_decl_index);
try mod.linkerUpdateDecl(test_name_decl_index);
const test_fn_fields = .{
@ -6541,36 +6487,51 @@ pub fn populateTestFunctions(
} }),
// func
try mod.intern(.{ .ptr = .{
.ty = test_decl.ty.ip_index,
.ty = test_decl.ty.toIntern(),
.addr = .{ .decl = test_decl_index },
} }),
// async_frame_size
null_usize,
};
test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{
.ty = test_fn_ty.ip_index,
test_fn_val.* = try mod.intern(.{ .aggregate = .{
.ty = test_fn_ty.toIntern(),
.storage = .{ .elems = &test_fn_fields },
} })).toValue();
} });
}
const array_decl_ty = try mod.arrayType(.{
.len = test_fn_vals.len,
.child = test_fn_ty.toIntern(),
.sentinel = .none,
});
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
.ty = array_decl_ty,
.val = (try mod.intern(.{ .aggregate = .{
.ty = array_decl_ty.toIntern(),
.storage = .{ .elems = test_fn_vals },
} })).toValue(),
});
for (array_decl_dependencies.items) |array_decl_dependency| {
try mod.declareDeclDependency(array_decl_index, array_decl_dependency);
}
try array_decl.finalizeNewArena(&new_decl_arena);
break :d array_decl_index;
};
try mod.linkerUpdateDecl(array_decl_index);
{
const new_ty = try mod.ptrType(.{
.elem_type = test_fn_ty.ip_index,
.elem_type = test_fn_ty.toIntern(),
.is_const = true,
.size = .Slice,
});
const new_val = decl.val;
const new_init = try mod.intern(.{ .ptr = .{
.ty = new_ty.ip_index,
.ty = new_ty.toIntern(),
.addr = .{ .decl = array_decl_index },
.len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index,
.len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(),
} });
mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init);
mod.intern_pool.mutateVarInit(decl.val.toIntern(), new_init);
// Since we are replacing the Decl's value we must perform cleanup on the
// previous value.
@ -6650,47 +6611,32 @@ fn reportRetryableFileError(
}
pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void {
switch (val.ip_index) {
.none => switch (val.tag()) {
.aggregate => {
for (val.castTag(.aggregate).?.data) |field_val| {
mod.markReferencedDeclsAlive(field_val);
}
},
.@"union" => {
const data = val.castTag(.@"union").?.data;
mod.markReferencedDeclsAlive(data.tag);
mod.markReferencedDeclsAlive(data.val);
},
else => {},
switch (mod.intern_pool.indexToKey(val.toIntern())) {
.variable => |variable| mod.markDeclIndexAlive(variable.decl),
.extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl),
.func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl),
.error_union => |error_union| switch (error_union.val) {
.err_name => {},
.payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()),
},
else => switch (mod.intern_pool.indexToKey(val.ip_index)) {
.variable => |variable| mod.markDeclIndexAlive(variable.decl),
.extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl),
.func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl),
.error_union => |error_union| switch (error_union.val) {
.err_name => {},
.payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()),
},
.ptr => |ptr| {
switch (ptr.addr) {
.decl => |decl| mod.markDeclIndexAlive(decl),
.mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl),
.int, .comptime_field => {},
.eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()),
.elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()),
}
if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue());
},
.opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()),
.aggregate => |aggregate| for (aggregate.storage.values()) |elem|
mod.markReferencedDeclsAlive(elem.toValue()),
.un => |un| {
mod.markReferencedDeclsAlive(un.tag.toValue());
mod.markReferencedDeclsAlive(un.val.toValue());
},
else => {},
.ptr => |ptr| {
switch (ptr.addr) {
.decl => |decl| mod.markDeclIndexAlive(decl),
.mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl),
.int, .comptime_field => {},
.eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()),
.elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()),
}
if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue());
},
.opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()),
.aggregate => |aggregate| for (aggregate.storage.values()) |elem|
mod.markReferencedDeclsAlive(elem.toValue()),
.un => |un| {
mod.markReferencedDeclsAlive(un.tag.toValue());
mod.markReferencedDeclsAlive(un.val.toValue());
},
else => {},
}
}
@ -6796,11 +6742,11 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
}
pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .elem_type = child_type.ip_index });
return ptrType(mod, .{ .elem_type = child_type.toIntern() });
}
pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type {
return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true });
return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true });
}
pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type {
@ -6871,9 +6817,9 @@ pub fn errorSetFromUnsortedNames(
pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
if (ty.isPtrLikeOptional(mod)) {
const i = try intern(mod, .{ .opt = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.val = try intern(mod, .{ .ptr = .{
.ty = ty.childType(mod).ip_index,
.ty = ty.childType(mod).toIntern(),
.addr = .{ .int = try intern(mod, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = x },
@ -6890,7 +6836,7 @@ pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
assert(ty.zigTypeTag(mod) == .Pointer);
const i = try intern(mod, .{ .ptr = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.addr = .{ .int = try intern(mod, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = x },
@ -6906,7 +6852,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er
assert(tag == .Enum);
}
const i = try intern(mod, .{ .enum_tag = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.int = tag_int,
} });
return i.toValue();
@ -6917,12 +6863,12 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er
pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
const ip = &mod.intern_pool;
const gpa = mod.gpa;
const enum_type = ip.indexToKey(ty.ip_index).enum_type;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
if (enum_type.values.len == 0) {
// Auto-numbered fields.
return (try ip.get(gpa, .{ .enum_tag = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.int = try ip.get(gpa, .{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = field_index },
@ -6931,7 +6877,7 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E
}
return (try ip.get(gpa, .{ .enum_tag = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.int = enum_type.values[field_index],
} })).toValue();
}
@ -6950,7 +6896,7 @@ pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value {
const i = try intern(mod, .{ .int = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.storage = .{ .big_int = x },
} });
return i.toValue();
@ -6958,7 +6904,7 @@ pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Valu
pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
const i = try intern(mod, .{ .int = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.storage = .{ .u64 = x },
} });
return i.toValue();
@ -6966,7 +6912,7 @@ pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value {
pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value {
const i = try intern(mod, .{ .int = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.storage = .{ .i64 = x },
} });
return i.toValue();
@ -6974,9 +6920,9 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value {
pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value {
const i = try intern(mod, .{ .un = .{
.ty = union_ty.ip_index,
.tag = tag.ip_index,
.val = val.ip_index,
.ty = union_ty.toIntern(),
.tag = tag.toIntern(),
.val = val.toIntern(),
} });
return i.toValue();
}
@ -6993,7 +6939,7 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
else => unreachable,
};
const i = try intern(mod, .{ .float = .{
.ty = ty.ip_index,
.ty = ty.toIntern(),
.storage = storage,
} });
return i.toValue();
@ -7001,9 +6947,9 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value {
pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value {
const ip = &mod.intern_pool;
assert(ip.isOptionalType(opt_ty.ip_index));
assert(ip.isOptionalType(opt_ty.toIntern()));
const result = try ip.get(mod.gpa, .{ .opt = .{
.ty = opt_ty.ip_index,
.ty = opt_ty.toIntern(),
.val = .none,
} });
return result.toValue();
@ -7042,7 +6988,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type {
pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
assert(!val.isUndef(mod));
const key = mod.intern_pool.indexToKey(val.ip_index);
const key = mod.intern_pool.indexToKey(val.toIntern());
switch (key.int.storage) {
.i64 => |x| {
if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted);
@ -7221,19 +7167,19 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
/// * Not a struct.
pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
if (ty.ip_index == .none) return null;
const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null;
const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null;
return mod.structPtr(struct_index);
}
pub fn typeToUnion(mod: *Module, ty: Type) ?*Union {
if (ty.ip_index == .none) return null;
const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null;
const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null;
return mod.unionPtr(union_index);
}
pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
if (ty.ip_index == .none) return null;
return mod.intern_pool.indexToFuncType(ty.ip_index);
return mod.intern_pool.indexToFuncType(ty.toIntern());
}
pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet {
@ -7243,7 +7189,7 @@ pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet {
pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex {
if (ty.ip_index == .none) return .none;
return mod.intern_pool.indexToInferredErrorSetType(ty.ip_index);
return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern());
}
pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc {
@ -7268,5 +7214,5 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu
}
pub fn toEnum(mod: *Module, comptime E: type, val: Value) E {
return mod.intern_pool.toEnum(E, val.ip_index);
return mod.intern_pool.toEnum(E, val.toIntern());
}

File diff suppressed because it is too large Load Diff

View File

@ -103,10 +103,26 @@ pub fn print(
return writer.writeAll(" }");
},
.bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)});
.repeated => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
var i: u32 = 0;
try writer.writeAll(".{ ");
const elem_tv = TypedValue{
.ty = ty.elemType2(mod),
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen(mod);
const max_len = std.math.min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
},
// TODO these should not appear in this function
.inferred_alloc => return writer.writeAll("(inferred allocation value)"),

View File

@ -846,7 +846,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.interned => unreachable, // excluded from function bodies
.unreach => self.finishAirBookkeeping(),
@ -6169,7 +6168,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
}
return gop.value_ptr.*;
},
.const_ty => unreachable,
else => return self.getResolvedInstValue(inst_index),
}
}

View File

@ -830,7 +830,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.interned => unreachable, // excluded from function bodies
.unreach => self.finishAirBookkeeping(),
@ -6117,7 +6116,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
}
return gop.value_ptr.*;
},
.const_ty => unreachable,
else => return self.getResolvedInstValue(inst_index),
}
}

View File

@ -660,7 +660,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.interned => unreachable, // excluded from function bodies
.unreach => self.finishAirBookkeeping(),
@ -2571,7 +2570,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
}
return gop.value_ptr.*;
},
.const_ty => unreachable,
else => return self.getResolvedInstValue(inst_index),
}
}

View File

@ -680,7 +680,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.interned => unreachable, // excluded from function bodies
.unreach => self.finishAirBookkeeping(),
@ -4567,7 +4566,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
}
return gop.value_ptr.*;
},
.const_ty => unreachable,
else => return self.getResolvedInstValue(inst),
}
}

View File

@ -1833,7 +1833,6 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const air_tags = func.air.instructions.items(.tag);
return switch (air_tags[inst]) {
.constant => unreachable,
.const_ty => unreachable,
.interned => unreachable,
.add => func.airBinOp(inst, .add),
@ -6903,28 +6902,12 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
.child = .u8_type,
.sentinel = .zero_u8,
});
const string_bytes = &mod.string_literal_bytes;
try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len);
const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, @as([]const u8, tag_name), Module.StringLiteralAdapter{
.bytes = string_bytes,
}, Module.StringLiteralContext{
.bytes = string_bytes,
});
if (!gop.found_existing) {
gop.key_ptr.* = .{
.index = @intCast(u32, string_bytes.items.len),
.len = @intCast(u32, tag_name.len),
};
string_bytes.appendSliceAssumeCapacity(tag_name);
gop.value_ptr.* = .none;
}
var name_val_payload: Value.Payload.StrLit = .{
.base = .{ .tag = .str_lit },
.data = gop.key_ptr.*,
};
const name_val = Value.initPayload(&name_val_payload.base);
const name_val = try mod.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name },
} });
const tag_sym_index = try func.bin_file.lowerUnnamedConst(
.{ .ty = name_ty, .val = name_val },
.{ .ty = name_ty, .val = name_val.toValue() },
enum_decl_index,
);

View File

@ -1923,7 +1923,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.interned => unreachable, // excluded from function bodies
.unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(),
@ -2099,7 +2098,7 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void {
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
switch (self.air.instructions.items(.tag)[inst]) {
.constant, .const_ty => unreachable,
.constant => unreachable,
else => self.inst_tracking.getPtr(inst).?.die(self, inst),
}
}
@ -11593,7 +11592,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
}));
break :tracking gop.value_ptr;
},
.const_ty => unreachable,
else => self.inst_tracking.getPtr(inst).?,
}.short;
switch (mcv) {
@ -11608,7 +11606,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking {
const tracking = switch (self.air.instructions.items(.tag)[inst]) {
.constant => &self.const_tracking,
.const_ty => unreachable,
else => &self.inst_tracking,
}.getPtr(inst).?;
return switch (tracking.short) {

View File

@ -204,150 +204,6 @@ pub fn generateSymbol(
return .ok;
}
if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) {
.Array => switch (typed_value.val.tag()) {
.bytes => {
const bytes = typed_value.val.castTag(.bytes).?.data;
const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod));
// The bytes payload already includes the sentinel, if any
try code.ensureUnusedCapacity(len);
code.appendSliceAssumeCapacity(bytes[0..len]);
return Result.ok;
},
.str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try code.ensureUnusedCapacity(bytes.len + 1);
code.appendSliceAssumeCapacity(bytes);
if (typed_value.ty.sentinel(mod)) |sent_val| {
const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
code.appendAssumeCapacity(byte);
}
return Result.ok;
},
else => return Result{
.fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO implement generateSymbol for array type value: {s}",
.{@tagName(typed_value.val.tag())},
),
},
},
.Struct => {
if (typed_value.ty.containerLayout(mod) == .Packed) {
const struct_obj = mod.typeToStruct(typed_value.ty).?;
const fields = struct_obj.fields.values();
const field_vals = typed_value.val.castTag(.aggregate).?.data;
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
for (field_vals, 0..) |field_val, index| {
const field_ty = fields[index].ty;
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those seperately.
if (field_ty.zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = field_val,
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @intCast(u16, field_ty.bitSize(mod));
}
return Result.ok;
}
const struct_begin = code.items.len;
const field_vals = typed_value.val.castTag(.aggregate).?.data;
for (field_vals, 0..) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index, mod);
if (!field_ty.hasRuntimeBits(mod)) continue;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = field_val,
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
const unpadded_field_end = code.items.len - struct_begin;
// Pad struct members if required
const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod);
const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow;
if (padding > 0) {
try code.writer().writeByteNTimes(0, padding);
}
}
return Result.ok;
},
.Vector => switch (typed_value.val.tag()) {
.bytes => {
const bytes = typed_value.val.castTag(.bytes).?.data;
const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow;
const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse
return error.Overflow;
try code.ensureUnusedCapacity(len + padding);
code.appendSliceAssumeCapacity(bytes[0..len]);
if (padding > 0) try code.writer().writeByteNTimes(0, padding);
return Result.ok;
},
.str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse
return error.Overflow;
try code.ensureUnusedCapacity(str_lit.len + padding);
code.appendSliceAssumeCapacity(bytes);
if (padding > 0) try code.writer().writeByteNTimes(0, padding);
return Result.ok;
},
else => unreachable,
},
.Frame,
.AnyFrame,
=> return .{ .fail = try ErrorMsg.create(
bin_file.allocator,
src_loc,
"TODO generateSymbol for type {}",
.{typed_value.ty.fmt(mod)},
) },
.Float,
.Union,
.Optional,
.ErrorUnion,
.ErrorSet,
.Int,
.Enum,
.Bool,
.Pointer,
=> unreachable, // handled below
.Type,
.Void,
.NoReturn,
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.Opaque,
.EnumLiteral,
.Fn,
=> unreachable, // comptime-only types
};
switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) {
.int_type,
.ptr_type,

View File

@ -870,7 +870,7 @@ pub const DeclGen = struct {
}
// First try specific tag representations for more efficiency.
switch (val.ip_index) {
switch (val.toIntern()) {
.undef => {
const ai = ty.arrayInfo(mod);
try writer.writeByte('{');
@ -893,24 +893,6 @@ pub const DeclGen = struct {
try writer.writeByte('}');
return;
},
.none => switch (val.tag()) {
.bytes, .str_lit => |t| {
const bytes = switch (t) {
.bytes => val.castTag(.bytes).?.data,
.str_lit => bytes: {
const str_lit = val.castTag(.str_lit).?.data;
break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
},
else => unreachable,
};
const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null;
try writer.print("{s}", .{
fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel),
});
return;
},
else => {},
},
else => {},
}
// Fall back to generic implementation.
@ -2909,7 +2891,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
const result_value = switch (air_tags[inst]) {
// zig fmt: off
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.interned => unreachable, // excluded from function bodies
.arg => try airArg(f, inst),

View File

@ -1501,7 +1501,7 @@ pub const Object = struct {
}
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.ip_index).enum_type;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len);
defer gpa.free(enumerators);
@ -1697,7 +1697,7 @@ pub const Object = struct {
return ptr_di_ty;
},
.Opaque => {
if (ty.ip_index == .anyopaque_type) {
if (ty.toIntern() == .anyopaque_type) {
const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
@ -1981,7 +1981,7 @@ pub const Object = struct {
break :blk fwd_decl;
};
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
@ -2466,7 +2466,7 @@ pub const DeclGen = struct {
global.setGlobalConstant(.True);
break :init_val decl.val;
};
if (init_val.ip_index != .unreachable_value) {
if (init_val.toIntern() != .unreachable_value) {
const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val });
if (global.globalGetValueType() == llvm_init.typeOf()) {
global.setInitializer(llvm_init);
@ -2802,12 +2802,12 @@ pub const DeclGen = struct {
return dg.context.pointerType(llvm_addrspace);
},
.Opaque => {
if (t.ip_index == .anyopaque_type) return dg.context.intType(8);
if (t.toIntern() == .anyopaque_type) return dg.context.intType(8);
const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type;
const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type;
const name = try mod.opaqueFullyQualifiedName(opaque_type);
defer gpa.free(name);
@ -2897,7 +2897,7 @@ pub const DeclGen = struct {
const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) {
const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) {
.anon_struct_type => |tuple| {
const llvm_struct_ty = dg.context.structCreateNamed("");
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
@ -3199,7 +3199,7 @@ pub const DeclGen = struct {
const mod = dg.module;
const target = mod.getTarget();
var tv = arg_tv;
switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
.runtime_value => |rt| tv.val = rt.val.toValue(),
else => {},
}
@ -3208,284 +3208,7 @@ pub const DeclGen = struct {
return llvm_type.getUndef();
}
if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) {
.Array => switch (tv.val.tag()) {
.bytes => {
const bytes = tv.val.castTag(.bytes).?.data;
return dg.context.constString(
bytes.ptr,
@intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)),
.True, // Don't null terminate. Bytes has the sentinel, if any.
);
},
.str_lit => {
const str_lit = tv.val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
if (tv.ty.sentinel(mod)) |sent_val| {
const byte = @intCast(u8, sent_val.toUnsignedInt(mod));
if (byte == 0 and bytes.len > 0) {
return dg.context.constString(
bytes.ptr,
@intCast(c_uint, bytes.len),
.False, // Yes, null terminate.
);
}
var array = std.ArrayList(u8).init(dg.gpa);
defer array.deinit();
try array.ensureUnusedCapacity(bytes.len + 1);
array.appendSliceAssumeCapacity(bytes);
array.appendAssumeCapacity(byte);
return dg.context.constString(
array.items.ptr,
@intCast(c_uint, array.items.len),
.True, // Don't null terminate.
);
} else {
return dg.context.constString(
bytes.ptr,
@intCast(c_uint, bytes.len),
.True, // Don't null terminate. `bytes` has the sentinel, if any.
);
}
},
else => unreachable,
},
.Struct => {
const llvm_struct_ty = try dg.lowerType(tv.ty);
const gpa = dg.gpa;
const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
.anon_struct_type => |tuple| {
var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
defer llvm_fields.deinit(gpa);
try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
if (field_val != .none) continue;
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
const field_llvm_val = try dg.lowerValue(.{
.ty = field_ty.toType(),
.val = try tv.val.fieldValue(mod, i),
});
need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val);
llvm_fields.appendAssumeCapacity(field_llvm_val);
offset += field_ty.toType().abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
}
if (need_unnamed) {
return dg.context.constStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
.False,
);
} else {
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
);
}
},
.struct_type => |struct_type| struct_type,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(mod);
const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
for (fields, 0..) |field, i| {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try dg.lowerValue(.{
.ty = field.ty,
.val = try tv.val.fieldValue(mod, i),
});
const ty_bit_size = @intCast(u16, field.ty.bitSize(mod));
const small_int_ty = dg.context.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
non_int_val.constPtrToInt(small_int_ty)
else
non_int_val.constBitCast(small_int_ty);
const shift_rhs = int_llvm_ty.constInt(running_bits, .False);
// If the field is as large as the entire packed struct, this
// zext would go from, e.g. i16 to i16. This is legal with
// constZExtOrBitCast but not legal with constZExt.
const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty);
const shifted = extended_int_val.constShl(shift_rhs);
running_int = running_int.constOr(shifted);
running_bits += ty_bit_size;
}
return running_int;
}
const llvm_field_count = llvm_struct_ty.countStructElementTypes();
var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
defer llvm_fields.deinit(gpa);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
var need_unnamed = false;
var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const field_align = field.alignment(mod, struct_obj.layout);
big_align = @max(big_align, field_align);
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
// TODO make this and all other padding elsewhere in debug
// builds be 0xaa not undef.
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
const field_llvm_val = try dg.lowerValue(.{
.ty = field.ty,
.val = try tv.val.fieldValue(mod, field_and_index.index),
});
need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val);
llvm_fields.appendAssumeCapacity(field_llvm_val);
offset += field.ty.abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len));
llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef());
}
}
if (need_unnamed) {
return dg.context.constStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
.False,
);
} else {
return llvm_struct_ty.constNamedStruct(
llvm_fields.items.ptr,
@intCast(c_uint, llvm_fields.items.len),
);
}
},
.Vector => switch (tv.val.tag()) {
.bytes => {
// Note, sentinel is not stored even if the type has a sentinel.
const bytes = tv.val.castTag(.bytes).?.data;
const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
const elem_ty = tv.ty.childType(mod);
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems, 0..) |*elem, i| {
elem.* = try dg.lowerValue(.{
.ty = elem_ty,
.val = try mod.intValue(elem_ty, bytes[i]),
});
}
return llvm.constVector(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
},
.str_lit => {
// Note, sentinel is not stored
const str_lit = tv.val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
const vector_len = @intCast(usize, tv.ty.arrayLen(mod));
assert(vector_len == bytes.len);
const elem_ty = tv.ty.childType(mod);
const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems, 0..) |*elem, i| {
elem.* = try dg.lowerValue(.{
.ty = elem_ty,
.val = try mod.intValue(elem_ty, bytes[i]),
});
}
return llvm.constVector(
llvm_elems.ptr,
@intCast(c_uint, llvm_elems.len),
);
},
else => unreachable,
},
.Float,
.Union,
.Optional,
.ErrorUnion,
.ErrorSet,
.Int,
.Enum,
.Bool,
.Pointer,
=> unreachable, // handled below
.Frame,
.AnyFrame,
=> return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}),
.Type,
.Void,
.NoReturn,
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.Opaque,
.EnumLiteral,
.Fn,
=> unreachable, // comptime-only types
};
switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
@ -3553,7 +3276,7 @@ pub const DeclGen = struct {
const llvm_payload_value = try dg.lowerValue(.{
.ty = payload_type,
.val = switch (error_union.val) {
.err_name => try mod.intern(.{ .undef = payload_type.ip_index }),
.err_name => try mod.intern(.{ .undef = payload_type.toIntern() }),
.payload => |payload| payload,
}.toValue(),
});
@ -3700,7 +3423,7 @@ pub const DeclGen = struct {
fields_buf[0] = try dg.lowerValue(.{
.ty = payload_ty,
.val = switch (opt.val) {
.none => try mod.intern(.{ .undef = payload_ty.ip_index }),
.none => try mod.intern(.{ .undef = payload_ty.toIntern() }),
else => |payload| payload,
}.toValue(),
});
@ -3711,7 +3434,7 @@ pub const DeclGen = struct {
}
return dg.context.constStruct(&fields_buf, llvm_field_count, .False);
},
.aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
.aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) {
.array_type => switch (aggregate.storage) {
.bytes => |bytes| return dg.context.constString(
bytes.ptr,
@ -3802,7 +3525,7 @@ pub const DeclGen = struct {
const llvm_struct_ty = try dg.lowerType(tv.ty);
const gpa = dg.gpa;
const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) {
const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) {
.anon_struct_type => |tuple| {
var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
defer llvm_fields.deinit(gpa);
@ -3967,9 +3690,9 @@ pub const DeclGen = struct {
},
.un => {
const llvm_union_ty = try dg.lowerType(tv.ty);
const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) {
const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) {
.none => tv.val.castTag(.@"union").?.data,
else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) {
else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) {
.un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() },
else => unreachable,
},
@ -4107,7 +3830,7 @@ pub const DeclGen = struct {
fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value {
const mod = dg.module;
const target = mod.getTarget();
return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) {
return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) {
.int => |int| dg.lowerIntAsPtr(int),
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl),
@ -4799,7 +4522,6 @@ pub const FuncGen = struct {
.vector_store_elem => try self.airVectorStoreElem(inst),
.constant => unreachable,
.const_ty => unreachable,
.interned => unreachable,
.unreach => self.airUnreach(inst),
@ -6108,7 +5830,7 @@ pub const FuncGen = struct {
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = llvm_field.ty.ip_index,
.elem_type = llvm_field.ty.toIntern(),
.alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
});
if (isByRef(field_ty, mod)) {
@ -6984,7 +6706,7 @@ pub const FuncGen = struct {
const struct_llvm_ty = try self.dg.lowerType(struct_ty);
const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, "");
const field_ptr_ty = try mod.ptrType(.{
.elem_type = llvm_field.ty.ip_index,
.elem_type = llvm_field.ty.toIntern(),
.alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment),
});
return self.load(field_ptr, field_ptr_ty);
@ -8915,7 +8637,7 @@ pub const FuncGen = struct {
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
const mod = self.dg.module;
const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type;
const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
// TODO: detect when the type changes and re-emit this function.
const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl);
@ -8988,7 +8710,7 @@ pub const FuncGen = struct {
fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
const mod = self.dg.module;
const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type;
const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
// TODO: detect when the type changes and re-emit this function.
const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl);
@ -10529,7 +10251,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
var offset: u64 = 0;
var big_align: u32 = 0;
const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var llvm_field_index: c_uint = 0;
for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| {
@ -10927,7 +10649,7 @@ const ParamTypeIterator = struct {
.riscv32, .riscv64 => {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.ip_index == .f16_type) {
if (ty.toIntern() == .f16_type) {
return .as_u16;
}
switch (riscv_c_abi.classifyType(ty, mod)) {
@ -11146,7 +10868,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout(mod) == .Packed) return false;
const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) {
const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var count: usize = 0;
for (tuple.types, tuple.values) |field_ty, field_val| {
@ -11261,7 +10983,7 @@ fn backendSupportsF128(target: std.Target) bool {
/// LLVM does not support all relevant intrinsics for all targets, so we
/// may need to manually generate a libc call
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
return switch (scalar_ty.ip_index) {
return switch (scalar_ty.toIntern()) {
.f16_type => backendSupportsF16(target),
.f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
.f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),

View File

@ -616,7 +616,7 @@ pub const DeclGen = struct {
const mod = dg.module;
var val = arg_val;
switch (mod.intern_pool.indexToKey(val.ip_index)) {
switch (mod.intern_pool.indexToKey(val.toIntern())) {
.runtime_value => |rt| val = rt.val.toValue(),
else => {},
}
@ -626,75 +626,7 @@ pub const DeclGen = struct {
return try self.addUndef(size);
}
if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) {
.Array => switch (val.tag()) {
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try self.addBytes(bytes);
if (ty.sentinel(mod)) |sentinel| {
try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod)));
}
},
.bytes => {
const bytes = val.castTag(.bytes).?.data;
try self.addBytes(bytes);
},
else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}),
},
.Struct => {
if (ty.isSimpleTupleOrAnonStruct(mod)) {
unreachable; // TODO
} else {
const struct_ty = mod.typeToStruct(ty).?;
if (struct_ty.layout == .Packed) {
return dg.todo("packed struct constants", .{});
}
const struct_begin = self.size;
const field_vals = val.castTag(.aggregate).?.data;
for (struct_ty.fields.values(), 0..) |field, i| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
try self.lower(field.ty, field_vals[i]);
// Add padding if required.
// TODO: Add to type generation as well?
const unpadded_field_end = self.size - struct_begin;
const padded_field_end = ty.structFieldOffset(i + 1, mod);
const padding = padded_field_end - unpadded_field_end;
try self.addUndef(padding);
}
}
},
.Vector,
.Frame,
.AnyFrame,
=> return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
.Float,
.Union,
.Optional,
.ErrorUnion,
.ErrorSet,
.Int,
.Enum,
.Bool,
.Pointer,
=> unreachable, // handled below
.Type,
.Void,
.NoReturn,
.ComptimeFloat,
.ComptimeInt,
.Undefined,
.Null,
.Opaque,
.EnumLiteral,
.Fn,
=> unreachable, // comptime-only types
};
switch (mod.intern_pool.indexToKey(val.ip_index)) {
switch (mod.intern_pool.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
@ -1876,7 +1808,6 @@ pub const DeclGen = struct {
.breakpoint => return,
.cond_br => return self.airCondBr(inst),
.constant => unreachable,
.const_ty => unreachable,
.dbg_stmt => return self.airDbgStmt(inst),
.loop => return self.airLoop(inst),
.ret => return self.airRet(inst),

View File

@ -95,7 +95,7 @@ const Writer = struct {
for (w.air.instructions.items(.tag), 0..) |tag, i| {
const inst = @intCast(Air.Inst.Index, i);
switch (tag) {
.constant, .const_ty, .interned => {
.constant, .interned => {
try w.writeInst(s, inst);
try s.writeByte('\n');
},
@ -226,7 +226,6 @@ const Writer = struct {
.save_err_return_trace_index,
=> try w.writeNoOp(s, inst),
.const_ty,
.alloc,
.ret_ptr,
.err_return_trace,

File diff suppressed because it is too large Load Diff