diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index a47497d89d..2570343763 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -22,8 +22,6 @@ const IdResultType = spec.IdResultType; const StorageClass = spec.StorageClass; const SpvModule = @import("spirv/Module.zig"); -const CacheRef = SpvModule.CacheRef; -const CacheString = SpvModule.CacheString; const SpvSection = @import("spirv/Section.zig"); const SpvAssembler = @import("spirv/Assembler.zig"); @@ -32,14 +30,11 @@ const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef); pub const zig_call_abi_ver = 3; -/// We want to store some extra facts about types as mapped from Zig to SPIR-V. -/// This structure is used to keep that extra information, as well as -/// the cached reference to the type. -const SpvTypeInfo = struct { - ty_ref: CacheRef, -}; - -const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, SpvTypeInfo); +const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, DeclGen.Repr }, IdResult); +const PtrTypeMap = std.AutoHashMapUnmanaged( + struct { InternPool.Index, StorageClass }, + struct { ty_id: IdRef, fwd_emitted: bool }, +); const ControlFlow = union(enum) { const Structured = struct { @@ -162,14 +157,16 @@ pub const Object = struct { /// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices. anon_decl_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{}, - /// A map that maps AIR intern pool indices to SPIR-V cache references (which - /// is basically the same thing except for SPIR-V). - /// This map is typically only used for structures that are deemed heavy enough - /// that it is worth to store them here. The SPIR-V module also interns types, - /// and so the main purpose of this map is to avoid recomputation and to - /// cache extra information about the type rather than to aid in validity - /// of the SPIR-V module. - type_map: TypeMap = .{}, + /// A map that maps AIR intern pool indices to SPIR-V result-ids. + intern_map: InternMap = .{}, + + /// This map serves a dual purpose: + /// - It keeps track of pointers that are currently being emitted, so that we can tell + /// if they are recursive and need an OpTypeForwardPointer. + /// - It caches pointers by child-type. This is required because sometimes we rely on + /// ID-equality for pointers, and pointers constructed via `ptrType()` aren't interned + /// via the usual `intern_map` mechanism. + ptr_types: PtrTypeMap = .{}, pub fn init(gpa: Allocator) Object { return .{ @@ -182,7 +179,8 @@ pub const Object = struct { self.spv.deinit(); self.decl_link.deinit(self.gpa); self.anon_decl_link.deinit(self.gpa); - self.type_map.deinit(self.gpa); + self.intern_map.deinit(self.gpa); + self.ptr_types.deinit(self.gpa); } fn genDecl( @@ -204,7 +202,8 @@ pub const Object = struct { .decl_index = decl_index, .air = air, .liveness = liveness, - .type_map = &self.type_map, + .intern_map = &self.intern_map, + .ptr_types = &self.ptr_types, .control_flow = switch (structured_cfg) { true => .{ .structured = .{} }, false => .{ .unstructured = .{} }, @@ -309,13 +308,12 @@ const DeclGen = struct { /// A map keeping track of which instruction generated which result-id. inst_results: InstMap = .{}, - /// A map that maps AIR intern pool indices to SPIR-V cache references. - /// See Object.type_map - type_map: *TypeMap, + /// A map that maps AIR intern pool indices to SPIR-V result-ids. + /// See `Object.intern_map`. + intern_map: *InternMap, - /// Child types of pointers that are currently in progress of being resolved. If a pointer - /// is already in this map, its recursive. - wip_pointers: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, CacheRef) = .{}, + /// Module's pointer types, see `Object.ptr_types`. + ptr_types: *PtrTypeMap, /// This field keeps track of the current state wrt structured or unstructured control flow. control_flow: ControlFlow, @@ -402,7 +400,6 @@ const DeclGen = struct { pub fn deinit(self: *DeclGen) void { self.args.deinit(self.gpa); self.inst_results.deinit(self.gpa); - self.wip_pointers.deinit(self.gpa); self.control_flow.deinit(self.gpa); self.func.deinit(self.gpa); } @@ -452,7 +449,7 @@ const DeclGen = struct { const mod = self.module; const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); - const decl_ptr_ty_ref = try self.ptrType(ty, .Generic); + const decl_ptr_ty_id = try self.ptrType(ty, .Generic); const spv_decl_index = blk: { const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, .Function }); @@ -460,7 +457,7 @@ const DeclGen = struct { try self.addFunctionDep(entry.value_ptr.*, .Function); const result_id = self.spv.declPtr(entry.value_ptr.*).result_id; - return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id); + return try self.castToGeneric(decl_ptr_ty_id, result_id); } const spv_decl_index = try self.spv.allocDecl(.invocation_global); @@ -488,19 +485,14 @@ const DeclGen = struct { self.func = .{}; defer self.func.deinit(self.gpa); - const void_ty_ref = try self.resolveType(Type.void, .direct); - const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{ - .return_type = void_ty_ref, - .parameters = &.{}, - } }); + const initializer_proto_ty_id = try self.functionType(Type.void, &.{}); const initializer_id = self.spv.allocId(); - try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = self.typeId(void_ty_ref), + .id_result_type = try self.resolveType(Type.void, .direct), .id_result = initializer_id, .function_control = .{}, - .function_type = self.typeId(initializer_proto_ty_ref), + .function_type = initializer_proto_ty_id, }); const root_block_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{ @@ -520,9 +512,9 @@ const DeclGen = struct { try self.spv.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)}); - const fn_decl_ptr_ty_ref = try self.ptrType(ty, .Function); + const fn_decl_ptr_ty_id = try self.ptrType(ty, .Function); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ - .id_result_type = self.typeId(fn_decl_ptr_ty_ref), + .id_result_type = fn_decl_ptr_ty_id, .id_result = result_id, .set = try self.spv.importInstructionSet(.zig), .instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere... @@ -530,7 +522,7 @@ const DeclGen = struct { }); } - return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id); + return try self.castToGeneric(decl_ptr_ty_id, result_id); } fn addFunctionDep(self: *DeclGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void { @@ -696,14 +688,25 @@ const DeclGen = struct { /// Emits a bool constant in a particular representation. fn constBool(self: *DeclGen, value: bool, repr: Repr) !IdRef { + // TODO: Cache? + + const section = &self.spv.sections.types_globals_constants; switch (repr) { .indirect => { - const int_ty_ref = try self.intType(.unsigned, 1); - return self.constInt(int_ty_ref, @intFromBool(value)); + return try self.constInt(Type.u1, @intFromBool(value), .indirect); }, .direct => { - const bool_ty_ref = try self.resolveType(Type.bool, .direct); - return self.spv.constBool(bool_ty_ref, value); + const result_ty_id = try self.resolveType(Type.bool, .direct); + const result_id = self.spv.allocId(); + const operands = .{ + .id_result_type = result_ty_id, + .id_result = result_id, + }; + switch (value) { + true => try section.emit(self.spv.gpa, .OpConstantTrue, operands), + false => try section.emit(self.spv.gpa, .OpConstantFalse, operands), + } + return result_id; }, } } @@ -711,68 +714,63 @@ const DeclGen = struct { /// Emits an integer constant. /// This function, unlike SpvModule.constInt, takes care to bitcast /// the value to an unsigned int first for Kernels. - fn constInt(self: *DeclGen, ty_ref: CacheRef, value: anytype) !IdRef { - switch (self.spv.cache.lookup(ty_ref)) { - .vector_type => |vec_type| { - const elem_ids = try self.gpa.alloc(IdRef, vec_type.component_count); - defer self.gpa.free(elem_ids); - const int_value = try self.constInt(vec_type.component_type, value); - @memset(elem_ids, int_value); + fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef { + // TODO: Cache? + const mod = self.module; + const scalar_ty = ty.scalarType(mod); + const int_info = scalar_ty.intInfo(mod); + // Use backing bits so that negatives are sign extended + const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int - const constituents_id = self.spv.allocId(); - try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{ - .id_result_type = self.typeId(ty_ref), - .id_result = constituents_id, - .constituents = elem_ids, - }); - return constituents_id; - }, - else => {}, - } - - if (value < 0) { - const ty = self.spv.cache.lookup(ty_ref).int_type; - // Manually truncate the value so that the resulting value - // fits within the unsigned type. - const bits: u64 = @bitCast(@as(i64, @intCast(value))); - const truncated_bits = if (ty.bits == 64) - bits - else - bits & (@as(u64, 1) << @intCast(ty.bits)) - 1; - return try self.spv.constInt(ty_ref, truncated_bits); - } else { - return try self.spv.constInt(ty_ref, value); - } - } - - /// Emits a float constant - fn constFloat(self: *DeclGen, ty_ref: CacheRef, value: f128) !IdRef { - switch (self.spv.cache.lookup(ty_ref)) { - .vector_type => |vec_type| { - const elem_ids = try self.gpa.alloc(IdRef, vec_type.component_count); - defer self.gpa.free(elem_ids); - const int_value = try self.constFloat(vec_type.component_type, value); - @memset(elem_ids, int_value); - - const constituents_id = self.spv.allocId(); - try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{ - .id_result_type = self.typeId(ty_ref), - .id_result = constituents_id, - .constituents = elem_ids, - }); - return constituents_id; - }, - else => {}, - } - - const ty = self.spv.cache.lookup(ty_ref).float_type; - return switch (ty.bits) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = ty_ref, .value = .{ .float16 = @floatCast(value) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = ty_ref, .value = .{ .float32 = @floatCast(value) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = ty_ref, .value = .{ .float64 = @floatCast(value) } } }), - 80, 128 => unreachable, // TODO - else => unreachable, + const bits: u64 = switch (int_info.signedness) { + // Intcast needed to silence compile errors for when the wrong path is compiled. + // Lazy fix. + .signed => @bitCast(@as(i64, @intCast(value))), + .unsigned => @as(u64, @intCast(value)), }; + + // Manually truncate the value to the right amount of bits. + const truncated_bits = if (backing_bits == 64) + bits + else + bits & (@as(u64, 1) << @intCast(backing_bits)) - 1; + + const result_ty_id = try self.resolveType(scalar_ty, repr); + const result_id = self.spv.allocId(); + + const section = &self.spv.sections.types_globals_constants; + switch (backing_bits) { + 0 => unreachable, // u0 is comptime + 1...32 => try section.emit(self.spv.gpa, .OpConstant, .{ + .id_result_type = result_ty_id, + .id_result = result_id, + .value = .{ .uint32 = @truncate(truncated_bits) }, + }), + 33...64 => try section.emit(self.spv.gpa, .OpConstant, .{ + .id_result_type = result_ty_id, + .id_result = result_id, + .value = .{ .uint64 = truncated_bits }, + }), + else => unreachable, // TODO: Large integer constants + } + + if (!ty.isVector(mod)) { + return result_id; + } + + const n = ty.vectorLen(mod); + const ids = try self.gpa.alloc(IdRef, n); + defer self.gpa.free(ids); + @memset(ids, result_id); + + const vec_ty_id = try self.resolveType(ty, repr); + const vec_result_id = self.spv.allocId(); + try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{ + .id_result_type = vec_ty_id, + .id_result = vec_result_id, + .constituents = ids, + }); + return vec_result_id; } /// Construct a struct at runtime. @@ -788,8 +786,8 @@ const DeclGen = struct { // TODO: Make this OpCompositeConstruct when we can const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function }); for (constituents, types, 0..) |constitent_id, member_ty, index| { - const ptr_member_ty_ref = try self.ptrType(member_ty, .Function); - const ptr_id = try self.accessChain(ptr_member_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))}); + const ptr_member_ty_id = try self.ptrType(member_ty, .Function); + const ptr_id = try self.accessChain(ptr_member_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))}); try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, .object = constitent_id, @@ -810,9 +808,9 @@ const DeclGen = struct { // TODO: Make this OpCompositeConstruct when we can const mod = self.module; const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function }); - const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function); + const ptr_elem_ty_id = try self.ptrType(ty.elemType2(mod), .Function); for (constituents, 0..) |constitent_id, index| { - const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))}); + const ptr_id = try self.accessChain(ptr_elem_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))}); try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, .object = constitent_id, @@ -834,9 +832,9 @@ const DeclGen = struct { // TODO: Make this OpCompositeConstruct when we can const mod = self.module; const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function }); - const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function); + const ptr_elem_ty_id = try self.ptrType(ty.elemType2(mod), .Function); for (constituents, 0..) |constitent_id, index| { - const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))}); + const ptr_id = try self.accessChain(ptr_elem_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))}); try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, .object = constitent_id, @@ -852,258 +850,279 @@ const DeclGen = struct { /// is done by emitting a sequence of instructions that initialize the value. // /// This function should only be called during function code generation. - fn constant(self: *DeclGen, ty: Type, arg_val: Value, repr: Repr) !IdRef { + fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { + // Note: Using intern_map can only be used with constants that DO NOT generate any runtime code!! + // Ideally that should be all constants in the future, or it should be cleaned up somehow. For + // now, only use the intern_map on case-by-case basis by breaking to :cache. + if (self.intern_map.get(.{ val.toIntern(), repr })) |id| { + return id; + } + const mod = self.module; const target = self.getTarget(); - const result_ty_ref = try self.resolveType(ty, repr); + const result_ty_id = try self.resolveType(ty, repr); const ip = &mod.intern_pool; - const val = arg_val; - - log.debug("constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) }); + log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) }); if (val.isUndefDeep(mod)) { - return self.spv.constUndef(result_ty_ref); + return self.spv.constUndef(result_ty_id); } - switch (ip.indexToKey(val.toIntern())) { - .int_type, - .ptr_type, - .array_type, - .vector_type, - .opt_type, - .anyframe_type, - .error_union_type, - .simple_type, - .struct_type, - .anon_struct_type, - .union_type, - .opaque_type, - .enum_type, - .func_type, - .error_set_type, - .inferred_error_set_type, - => unreachable, // types, not values + const section = &self.spv.sections.types_globals_constants; - .undef => unreachable, // handled above + const cacheable_id = cache: { + switch (ip.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values - .variable, - .extern_func, - .func, - .enum_literal, - .empty_enum_value, - => unreachable, // non-runtime values + .undef => unreachable, // handled above - .simple_value => |simple_value| switch (simple_value) { - .undefined, - .void, - .null, - .empty_struct, - .@"unreachable", - .generic_poison, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values - .false, .true => return try self.constBool(val.toBool(), repr), - }, + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values - .int => { - if (ty.isSignedInt(mod)) { - return try self.constInt(result_ty_ref, val.toSignedInt(mod)); - } else { - return try self.constInt(result_ty_ref, val.toUnsignedInt(mod)); - } - }, - .float => return switch (ty.floatBits(target)) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }), - 80, 128 => unreachable, // TODO - else => unreachable, - }, - .err => |err| { - const value = try mod.getErrorValue(err.name); - return try self.constInt(result_ty_ref, value); - }, - .error_union => |error_union| { - // TODO: Error unions may be constructed with constant instructions if the payload type - // allows it. For now, just generate it here regardless. - const err_int_ty = try mod.errorIntType(); - const err_ty = switch (error_union.val) { - .err_name => ty.errorUnionSet(mod), - .payload => err_int_ty, - }; - const err_val = switch (error_union.val) { - .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{ - .ty = ty.errorUnionSet(mod).toIntern(), - .name = err_name, - } }))), - .payload => try mod.intValue(err_int_ty, 0), - }; - const payload_ty = ty.errorUnionPayload(mod); - const eu_layout = self.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - // We use the error type directly as the type. - return try self.constant(err_ty, err_val, .indirect); - } - - const payload_val = Value.fromInterned(switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), - .payload => |payload| payload, - }); - - var constituents: [2]IdRef = undefined; - var types: [2]Type = undefined; - if (eu_layout.error_first) { - constituents[0] = try self.constant(err_ty, err_val, .indirect); - constituents[1] = try self.constant(payload_ty, payload_val, .indirect); - types = .{ err_ty, payload_ty }; - } else { - constituents[0] = try self.constant(payload_ty, payload_val, .indirect); - constituents[1] = try self.constant(err_ty, err_val, .indirect); - types = .{ payload_ty, err_ty }; - } - - return try self.constructStruct(ty, &types, &constituents); - }, - .enum_tag => { - const int_val = try val.intFromEnum(ty, mod); - const int_ty = ty.intTagType(mod); - return try self.constant(int_ty, int_val, repr); - }, - .ptr => return self.constantPtr(ty, val), - .slice => |slice| { - const ptr_ty = ty.slicePtrFieldType(mod); - const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr)); - const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect); - return self.constructStruct( - ty, - &.{ ptr_ty, Type.usize }, - &.{ ptr_id, len_id }, - ); - }, - .opt => { - const payload_ty = ty.optionalChild(mod); - const maybe_payload_val = val.optionalValue(mod); - - if (!payload_ty.hasRuntimeBits(mod)) { - return try self.constBool(maybe_payload_val != null, .indirect); - } else if (ty.optionalReprIsPayload(mod)) { - // Optional representation is a nullable pointer or slice. - if (maybe_payload_val) |payload_val| { - return try self.constant(payload_ty, payload_val, .indirect); + .false, .true => break :cache try self.constBool(val.toBool(), repr), + }, + .int => { + if (ty.isSignedInt(mod)) { + break :cache try self.constInt(ty, val.toSignedInt(mod), repr); } else { - const ptr_ty_ref = try self.resolveType(ty, .indirect); - return self.spv.constNull(ptr_ty_ref); - } - } - - // Optional representation is a structure. - // { Payload, Bool } - - const has_pl_id = try self.constBool(maybe_payload_val != null, .indirect); - const payload_id = if (maybe_payload_val) |payload_val| - try self.constant(payload_ty, payload_val, .indirect) - else - try self.spv.constUndef(try self.resolveType(payload_ty, .indirect)); - - return try self.constructStruct( - ty, - &.{ payload_ty, Type.bool }, - &.{ payload_id, has_pl_id }, - ); - }, - .aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) { - inline .array_type, .vector_type => |array_type, tag| { - const elem_ty = Type.fromInterned(array_type.child); - const elem_ty_ref = try self.resolveType(elem_ty, .indirect); - - const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod))); - defer self.gpa.free(constituents); - - switch (aggregate.storage) { - .bytes => |bytes| { - // TODO: This is really space inefficient, perhaps there is a better - // way to do it? - for (bytes, 0..) |byte, i| { - constituents[i] = try self.constInt(elem_ty_ref, byte); - } - }, - .elems => |elems| { - for (0..@as(usize, @intCast(array_type.len))) |i| { - constituents[i] = try self.constant(elem_ty, Value.fromInterned(elems[i]), .indirect); - } - }, - .repeated_elem => |elem| { - const val_id = try self.constant(elem_ty, Value.fromInterned(elem), .indirect); - for (0..@as(usize, @intCast(array_type.len))) |i| { - constituents[i] = val_id; - } - }, - } - - switch (tag) { - inline .array_type => { - if (array_type.sentinel != .none) { - const sentinel = Value.fromInterned(array_type.sentinel); - constituents[constituents.len - 1] = try self.constant(elem_ty, sentinel, .indirect); - } - return self.constructArray(ty, constituents); - }, - inline .vector_type => return self.constructVector(ty, constituents), - else => unreachable, + break :cache try self.constInt(ty, val.toUnsignedInt(mod), repr); } }, - .struct_type => { - const struct_type = mod.typeToStruct(ty).?; - if (struct_type.layout == .@"packed") { - return self.todo("packed struct constants", .{}); + .float => { + const lit: spec.LiteralContextDependentNumber = switch (ty.floatBits(target)) { + 16 => .{ .uint32 = @as(u16, @bitCast(val.toFloat(f16, mod))) }, + 32 => .{ .float32 = val.toFloat(f32, mod) }, + 64 => .{ .float64 = val.toFloat(f64, mod) }, + 80, 128 => unreachable, // TODO + else => unreachable, + }; + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpConstant, .{ + .id_result_type = result_ty_id, + .id_result = result_id, + .value = lit, + }); + break :cache result_id; + }, + .err => |err| { + const value = try mod.getErrorValue(err.name); + break :cache try self.constInt(ty, value, repr); + }, + .error_union => |error_union| { + // TODO: Error unions may be constructed with constant instructions if the payload type + // allows it. For now, just generate it here regardless. + const err_int_ty = try mod.errorIntType(); + const err_ty = switch (error_union.val) { + .err_name => ty.errorUnionSet(mod), + .payload => err_int_ty, + }; + const err_val = switch (error_union.val) { + .err_name => |err_name| Value.fromInterned((try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } }))), + .payload => try mod.intValue(err_int_ty, 0), + }; + const payload_ty = ty.errorUnionPayload(mod); + const eu_layout = self.errorUnionLayout(payload_ty); + if (!eu_layout.payload_has_bits) { + // We use the error type directly as the type. + break :cache try self.constant(err_ty, err_val, .indirect); } - var types = std.ArrayList(Type).init(self.gpa); - defer types.deinit(); + const payload_val = Value.fromInterned(switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .payload => |payload| payload, + }); - var constituents = std.ArrayList(IdRef).init(self.gpa); - defer constituents.deinit(); + var constituents: [2]IdRef = undefined; + var types: [2]Type = undefined; + if (eu_layout.error_first) { + constituents[0] = try self.constant(err_ty, err_val, .indirect); + constituents[1] = try self.constant(payload_ty, payload_val, .indirect); + types = .{ err_ty, payload_ty }; + } else { + constituents[0] = try self.constant(payload_ty, payload_val, .indirect); + constituents[1] = try self.constant(err_ty, err_val, .indirect); + types = .{ payload_ty, err_ty }; + } - var it = struct_type.iterateRuntimeOrder(ip); - while (it.next()) |field_index| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { - // This is a zero-bit field - we only needed it for the alignment. - continue; + return try self.constructStruct(ty, &types, &constituents); + }, + .enum_tag => { + const int_val = try val.intFromEnum(ty, mod); + const int_ty = ty.intTagType(mod); + break :cache try self.constant(int_ty, int_val, repr); + }, + .ptr => return self.constantPtr(ty, val), + .slice => |slice| { + const ptr_ty = ty.slicePtrFieldType(mod); + const ptr_id = try self.constantPtr(ptr_ty, Value.fromInterned(slice.ptr)); + const len_id = try self.constant(Type.usize, Value.fromInterned(slice.len), .indirect); + return self.constructStruct( + ty, + &.{ ptr_ty, Type.usize }, + &.{ ptr_id, len_id }, + ); + }, + .opt => { + const payload_ty = ty.optionalChild(mod); + const maybe_payload_val = val.optionalValue(mod); + + if (!payload_ty.hasRuntimeBits(mod)) { + break :cache try self.constBool(maybe_payload_val != null, .indirect); + } else if (ty.optionalReprIsPayload(mod)) { + // Optional representation is a nullable pointer or slice. + if (maybe_payload_val) |payload_val| { + return try self.constant(payload_ty, payload_val, .indirect); + } else { + break :cache try self.spv.constNull(result_ty_id); + } + } + + // Optional representation is a structure. + // { Payload, Bool } + + const has_pl_id = try self.constBool(maybe_payload_val != null, .indirect); + const payload_id = if (maybe_payload_val) |payload_val| + try self.constant(payload_ty, payload_val, .indirect) + else + try self.spv.constUndef(try self.resolveType(payload_ty, .indirect)); + + return try self.constructStruct( + ty, + &.{ payload_ty, Type.bool }, + &.{ payload_id, has_pl_id }, + ); + }, + .aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) { + inline .array_type, .vector_type => |array_type, tag| { + const elem_ty = Type.fromInterned(array_type.child); + + const constituents = try self.gpa.alloc(IdRef, @as(u32, @intCast(ty.arrayLenIncludingSentinel(mod)))); + defer self.gpa.free(constituents); + + switch (aggregate.storage) { + .bytes => |bytes| { + // TODO: This is really space inefficient, perhaps there is a better + // way to do it? + for (bytes, 0..) |byte, i| { + constituents[i] = try self.constInt(elem_ty, byte, .indirect); + } + }, + .elems => |elems| { + for (0..@as(usize, @intCast(array_type.len))) |i| { + constituents[i] = try self.constant(elem_ty, Value.fromInterned(elems[i]), .indirect); + } + }, + .repeated_elem => |elem| { + const val_id = try self.constant(elem_ty, Value.fromInterned(elem), .indirect); + for (0..@as(usize, @intCast(array_type.len))) |i| { + constituents[i] = val_id; + } + }, } - // TODO: Padding? - const field_val = try val.fieldValue(mod, field_index); - const field_id = try self.constant(field_ty, field_val, .indirect); + switch (tag) { + inline .array_type => { + if (array_type.sentinel != .none) { + const sentinel = Value.fromInterned(array_type.sentinel); + constituents[constituents.len - 1] = try self.constant(elem_ty, sentinel, .indirect); + } + return self.constructArray(ty, constituents); + }, + inline .vector_type => return self.constructVector(ty, constituents), + else => unreachable, + } + }, + .struct_type => { + const struct_type = mod.typeToStruct(ty).?; + if (struct_type.layout == .@"packed") { + return self.todo("packed struct constants", .{}); + } - try types.append(field_ty); - try constituents.append(field_id); - } + var types = std.ArrayList(Type).init(self.gpa); + defer types.deinit(); - return try self.constructStruct(ty, types.items, constituents.items); + var constituents = std.ArrayList(IdRef).init(self.gpa); + defer constituents.deinit(); + + var it = struct_type.iterateRuntimeOrder(ip); + while (it.next()) |field_index| { + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { + // This is a zero-bit field - we only needed it for the alignment. + continue; + } + + // TODO: Padding? + const field_val = try val.fieldValue(mod, field_index); + const field_id = try self.constant(field_ty, field_val, .indirect); + + try types.append(field_ty); + try constituents.append(field_id); + } + + return try self.constructStruct(ty, types.items, constituents.items); + }, + .anon_struct_type => unreachable, // TODO + else => unreachable, }, - .anon_struct_type => unreachable, // TODO - else => unreachable, - }, - .un => |un| { - const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; - const union_obj = mod.typeToUnion(ty).?; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]); - const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod)) - try self.constant(field_ty, Value.fromInterned(un.val), .direct) - else - null; - return try self.unionInit(ty, active_field, payload); - }, - .memoized_call => unreachable, - } + .un => |un| { + const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), mod).?; + const union_obj = mod.typeToUnion(ty).?; + const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]); + const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod)) + try self.constant(field_ty, Value.fromInterned(un.val), .direct) + else + null; + return try self.unionInit(ty, active_field, payload); + }, + .memoized_call => unreachable, + } + }; + + try self.intern_map.putNoClobber(self.gpa, .{ val.toIntern(), repr }, cacheable_id); + + return cacheable_id; } fn constantPtr(self: *DeclGen, ptr_ty: Type, ptr_val: Value) Error!IdRef { - const result_ty_ref = try self.resolveType(ptr_ty, .direct); + // TODO: Caching?? + + const result_ty_id = try self.resolveType(ptr_ty, .direct); const mod = self.module; - if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_ref); + if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_id); switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { .decl => |decl| return try self.constantDeclRef(ptr_ty, decl), @@ -1114,7 +1133,7 @@ const DeclGen = struct { // that is not implemented by Mesa yet. Therefore, just generate it // as a runtime operation. try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = ptr_id, .integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct), }); @@ -1126,23 +1145,23 @@ const DeclGen = struct { .elem => |elem_ptr| { const parent_ptr_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base)); const parent_ptr_id = try self.constantPtr(parent_ptr_ty, Value.fromInterned(elem_ptr.base)); - const size_ty_ref = try self.sizeType(); - const index_id = try self.constInt(size_ty_ref, elem_ptr.index); + const index_id = try self.constInt(Type.usize, elem_ptr.index, .direct); const elem_ptr_id = try self.ptrElemPtr(parent_ptr_ty, parent_ptr_id, index_id); // TODO: Can we consolidate this in ptrElemPtr? const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. - const elem_ptr_ty_ref = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod))); + const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod))); - if (elem_ptr_ty_ref == result_ty_ref) { + // TODO: Can we remove this ID comparison? + if (elem_ptr_ty_id == result_ty_id) { return elem_ptr_id; } // This may happen when we have pointer-to-array and the result is // another pointer-to-array instead of a pointer-to-element. const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .operand = elem_ptr_id, }); @@ -1166,7 +1185,7 @@ const DeclGen = struct { const mod = self.module; const ip = &mod.intern_pool; - const ty_ref = try self.resolveType(ty, .direct); + const ty_id = try self.resolveType(ty, .direct); const decl_val = anon_decl.val; const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); @@ -1181,7 +1200,7 @@ const DeclGen = struct { // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { // Pointer to nothing - return undefoined - return self.spv.constUndef(ty_ref); + return self.spv.constUndef(ty_id); } if (decl_ty.zigTypeTag(mod) == .Fn) { @@ -1190,14 +1209,14 @@ const DeclGen = struct { // Anon decl refs are always generic. assert(ty.ptrAddressSpace(mod) == .generic); - const decl_ptr_ty_ref = try self.ptrType(decl_ty, .Generic); + const decl_ptr_ty_id = try self.ptrType(decl_ty, .Generic); const ptr_id = try self.resolveAnonDecl(decl_val); - if (decl_ptr_ty_ref != ty_ref) { + if (decl_ptr_ty_id != ty_id) { // Differing pointer types, insert a cast. const casted_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = ty_id, .id_result = casted_ptr_id, .operand = ptr_id, }); @@ -1209,15 +1228,14 @@ const DeclGen = struct { fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef { const mod = self.module; - const ty_ref = try self.resolveType(ty, .direct); - const ty_id = self.typeId(ty_ref); + const ty_id = try self.resolveType(ty, .direct); const decl = mod.declPtr(decl_index); switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { .func => { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by a pointer to usize. - return try self.spv.constUndef(ty_ref); + return try self.spv.constUndef(ty_id); }, .extern_func => unreachable, // TODO else => {}, @@ -1225,7 +1243,7 @@ const DeclGen = struct { if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(mod)) { // Pointer to nothing - return undefined. - return self.spv.constUndef(ty_ref); + return self.spv.constUndef(ty_id); } const spv_decl_index = try self.object.resolveDecl(mod, decl_index); @@ -1239,14 +1257,14 @@ const DeclGen = struct { const final_storage_class = self.spvStorageClass(decl.@"addrspace"); try self.addFunctionDep(spv_decl_index, final_storage_class); - const decl_ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class); + const decl_ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class); const ptr_id = switch (final_storage_class) { - .Generic => try self.castToGeneric(self.typeId(decl_ptr_ty_ref), decl_id), + .Generic => try self.castToGeneric(decl_ptr_ty_id, decl_id), else => decl_id, }; - if (decl_ptr_ty_ref != ty_ref) { + if (decl_ptr_ty_id != ty_id) { // Differing pointer types, insert a cast. const casted_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ @@ -1261,28 +1279,18 @@ const DeclGen = struct { } // Turn a Zig type's name into a cache reference. - fn resolveTypeName(self: *DeclGen, ty: Type) !CacheString { + fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 { var name = std.ArrayList(u8).init(self.gpa); defer name.deinit(); try ty.print(name.writer(), self.module); - return try self.spv.resolveString(name.items); - } - - /// Turn a Zig type into a SPIR-V Type, and return its type result-id. - fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType { - const type_ref = try self.resolveType(ty, .direct); - return self.spv.resultId(type_ref); - } - - fn typeId(self: *DeclGen, ty_ref: CacheRef) IdRef { - return self.spv.resultId(ty_ref); + return try name.toOwnedSlice(); } /// Create an integer type suitable for storing at least 'bits' bits. /// The integer type that is returned by this function is the type that is used to perform /// actual operations (as well as store) a Zig type of a particular number of bits. To create /// a type with an exact size, use SpvModule.intType. - fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !CacheRef { + fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !IdRef { const backing_bits = self.backingIntBits(bits) orelse { // TODO: Integers too big for any native type are represented as "composite integers": // An array of largestSupportedIntBits. @@ -1297,36 +1305,69 @@ const DeclGen = struct { return self.spv.intType(.unsigned, backing_bits); } - /// Create an integer type that represents 'usize'. - fn sizeType(self: *DeclGen) !CacheRef { - return try self.intType(.unsigned, self.getTarget().ptrBitWidth()); + fn arrayType(self: *DeclGen, len: u32, child_ty: IdRef) !IdRef { + // TODO: Cache?? + const len_id = try self.constInt(Type.u32, len, .direct); + const result_id = self.spv.allocId(); + + try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeArray, .{ + .id_result = result_id, + .element_type = child_ty, + .length = len_id, + }); + return result_id; } - fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !CacheRef { + fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !IdRef { const key = .{ child_ty.toIntern(), storage_class }; - const entry = try self.wip_pointers.getOrPut(self.gpa, key); + const entry = try self.ptr_types.getOrPut(self.gpa, key); if (entry.found_existing) { - const fwd_ref = entry.value_ptr.*; - try self.spv.cache.recursive_ptrs.put(self.spv.gpa, fwd_ref, {}); - return fwd_ref; + const fwd_id = entry.value_ptr.ty_id; + if (!entry.value_ptr.fwd_emitted) { + try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeForwardPointer, .{ + .pointer_type = fwd_id, + .storage_class = storage_class, + }); + entry.value_ptr.fwd_emitted = true; + } + return fwd_id; } - const fwd_ref = try self.spv.resolve(.{ .fwd_ptr_type = .{ - .zig_child_type = child_ty.toIntern(), + const result_id = self.spv.allocId(); + entry.value_ptr.* = .{ + .ty_id = result_id, + .fwd_emitted = false, + }; + + const child_ty_id = try self.resolveType(child_ty, .indirect); + + try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{ + .id_result = result_id, .storage_class = storage_class, - } }); - entry.value_ptr.* = fwd_ref; + .type = child_ty_id, + }); - const child_ty_ref = try self.resolveType(child_ty, .indirect); - _ = try self.spv.resolve(.{ .ptr_type = .{ - .storage_class = storage_class, - .child_type = child_ty_ref, - .fwd = fwd_ref, - } }); + return result_id; + } - assert(self.wip_pointers.remove(key)); + fn functionType(self: *DeclGen, return_ty: Type, param_types: []const Type) !IdRef { + // TODO: Cache?? - return fwd_ref; + const param_ids = try self.gpa.alloc(IdRef, param_types.len); + defer self.gpa.free(param_ids); + + for (param_types, param_ids) |param_ty, *param_id| { + param_id.* = try self.resolveType(param_ty, .direct); + } + + const ty_id = self.spv.allocId(); + try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeFunction, .{ + .id_result = ty_id, + .return_type = try self.resolveFnReturnType(return_ty), + .id_ref_2 = param_ids, + }); + + return ty_id; } /// Generate a union type. Union types are always generated with the @@ -1347,7 +1388,7 @@ const DeclGen = struct { /// padding: [padding_size]u8, /// } /// If any of the fields' size is 0, it will be omitted. - fn resolveUnionType(self: *DeclGen, ty: Type) !CacheRef { + fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef { const mod = self.module; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; @@ -1362,48 +1403,43 @@ const DeclGen = struct { return try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect); } - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; + var member_types: [4]IdRef = undefined; + var member_names: [4][]const u8 = undefined; - var member_types: [4]CacheRef = undefined; - var member_names: [4]CacheString = undefined; - - const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled? + const u8_ty_id = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled? if (layout.tag_size != 0) { - const tag_ty_ref = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect); - member_types[layout.tag_index] = tag_ty_ref; - member_names[layout.tag_index] = try self.spv.resolveString("(tag)"); + const tag_ty_id = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect); + member_types[layout.tag_index] = tag_ty_id; + member_names[layout.tag_index] = "(tag)"; } if (layout.payload_size != 0) { - const payload_ty_ref = try self.resolveType(layout.payload_ty, .indirect); - member_types[layout.payload_index] = payload_ty_ref; - member_names[layout.payload_index] = try self.spv.resolveString("(payload)"); + const payload_ty_id = try self.resolveType(layout.payload_ty, .indirect); + member_types[layout.payload_index] = payload_ty_id; + member_names[layout.payload_index] = "(payload)"; } if (layout.payload_padding_size != 0) { - const payload_padding_ty_ref = try self.spv.arrayType(@intCast(layout.payload_padding_size), u8_ty_ref); - member_types[layout.payload_padding_index] = payload_padding_ty_ref; - member_names[layout.payload_padding_index] = try self.spv.resolveString("(payload padding)"); + const payload_padding_ty_id = try self.arrayType(@intCast(layout.payload_padding_size), u8_ty_id); + member_types[layout.payload_padding_index] = payload_padding_ty_id; + member_names[layout.payload_padding_index] = "(payload padding)"; } if (layout.padding_size != 0) { - const padding_ty_ref = try self.spv.arrayType(@intCast(layout.padding_size), u8_ty_ref); - member_types[layout.padding_index] = padding_ty_ref; - member_names[layout.padding_index] = try self.spv.resolveString("(padding)"); + const padding_ty_id = try self.arrayType(@intCast(layout.padding_size), u8_ty_id); + member_types[layout.padding_index] = padding_ty_id; + member_names[layout.padding_index] = "(padding)"; } - const ty_ref = try self.spv.resolve(.{ .struct_type = .{ - .name = try self.resolveTypeName(ty), - .member_types = member_types[0..layout.total_fields], - .member_names = member_names[0..layout.total_fields], - } }); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + const result_id = try self.spv.structType(member_types[0..layout.total_fields], member_names[0..layout.total_fields]); + const type_name = try self.resolveTypeName(ty); + defer self.gpa.free(type_name); + try self.spv.debugName(result_id, type_name); + return result_id; } - fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !CacheRef { + fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef { const mod = self.module; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this @@ -1420,26 +1456,46 @@ const DeclGen = struct { } /// Turn a Zig type into a SPIR-V Type, and return a reference to it. - fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef { + fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef { + if (self.intern_map.get(.{ ty.toIntern(), repr })) |id| { + return id; + } + + const id = try self.resolveTypeInner(ty, repr); + try self.intern_map.put(self.gpa, .{ ty.toIntern(), repr }, id); + return id; + } + + fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef { const mod = self.module; const ip = &mod.intern_pool; log.debug("resolveType: ty = {}", .{ty.fmt(mod)}); const target = self.getTarget(); + + const section = &self.spv.sections.types_globals_constants; + switch (ty.zigTypeTag(mod)) { .NoReturn => { assert(repr == .direct); - return try self.spv.resolve(.void_type); + return try self.spv.voidType(); }, .Void => switch (repr) { - .direct => return try self.spv.resolve(.void_type), + .direct => { + return try self.spv.voidType(); + }, // Pointers to void - .indirect => return try self.spv.resolve(.{ .opaque_type = .{ - .name = try self.spv.resolveString("void"), - } }), + .indirect => { + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypeOpaque, .{ + .id_result = result_id, + .literal_string = "void", + }); + return result_id; + }, }, .Bool => switch (repr) { - .direct => return try self.spv.resolve(.bool_type), - .indirect => return try self.intType(.unsigned, 1), + .direct => return try self.spv.boolType(), + .indirect => return try self.resolveType(Type.u1, .indirect), }, .Int => { const int_info = ty.intInfo(mod); @@ -1447,15 +1503,18 @@ const DeclGen = struct { // Some times, the backend will be asked to generate a pointer to i0. OpTypeInt // with 0 bits is invalid, so return an opaque type in this case. assert(repr == .indirect); - return try self.spv.resolve(.{ .opaque_type = .{ - .name = try self.spv.resolveString("u0"), - } }); + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypeOpaque, .{ + .id_result = result_id, + .literal_string = "u0", + }); + return result_id; } return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { const tag_ty = ty.intTagType(mod); - return self.resolveType(tag_ty, repr); + return try self.resolveType(tag_ty, repr); }, .Float => { // We can (and want) not really emulate floating points with other floating point types like with the integer types, @@ -1473,27 +1532,29 @@ const DeclGen = struct { return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); } - return try self.spv.resolve(.{ .float_type = .{ .bits = bits } }); + return try self.spv.floatType(bits); }, .Array => { - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; - const elem_ty = ty.childType(mod); - const elem_ty_ref = try self.resolveType(elem_ty, .indirect); + const elem_ty_id = try self.resolveType(elem_ty, .indirect); const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse { return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; - const ty_ref = if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { + + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { // The size of the array would be 0, but that is not allowed in SPIR-V. // This path can be reached when the backend is asked to generate a pointer to // an array of some zero-bit type. This should always be an indirect path. assert(repr == .indirect); // We cannot use the child type here, so just use an opaque type. - break :blk try self.spv.resolve(.{ .opaque_type = .{ - .name = try self.spv.resolveString("zero-sized array"), - } }); - } else if (total_len == 0) blk: { + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypeOpaque, .{ + .id_result = result_id, + .literal_string = "zero-sized array", + }); + return result_id; + } else if (total_len == 0) { // The size of the array would be 0, but that is not allowed in SPIR-V. // This path can be reached for example when there is a slicing of a pointer // that produces a zero-length array. In all cases where this type can be generated, @@ -1503,16 +1564,13 @@ const DeclGen = struct { // In this case, we have an array of a non-zero sized type. In this case, // generate an array of 1 element instead, so that ptr_elem_ptr instructions // can be lowered to ptrAccessChain instead of manually performing the math. - break :blk try self.spv.arrayType(1, elem_ty_ref); - } else try self.spv.arrayType(total_len, elem_ty_ref); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + return try self.arrayType(1, elem_ty_id); + } else { + return try self.arrayType(total_len, elem_ty_id); + } }, .Fn => switch (repr) { .direct => { - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; - const fn_info = mod.typeToFunc(ty).?; comptime assert(zig_call_abi_ver == 3); @@ -1525,75 +1583,67 @@ const DeclGen = struct { if (fn_info.is_var_args) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); - const param_ty_refs = try self.gpa.alloc(CacheRef, fn_info.param_types.len); - defer self.gpa.free(param_ty_refs); + // Note: Logic is different from functionType(). + const param_ty_ids = try self.gpa.alloc(IdRef, fn_info.param_types.len); + defer self.gpa.free(param_ty_ids); var param_index: usize = 0; for (fn_info.param_types.get(ip)) |param_ty_index| { const param_ty = Type.fromInterned(param_ty_index); if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - param_ty_refs[param_index] = try self.resolveType(param_ty, .direct); + param_ty_ids[param_index] = try self.resolveType(param_ty, .direct); param_index += 1; } - const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type)); - const ty_ref = try self.spv.resolve(.{ .function_type = .{ - .return_type = return_ty_ref, - .parameters = param_ty_refs[0..param_index], - } }); + const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type)); - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypeFunction, .{ + .id_result = result_id, + .return_type = return_ty_id, + .id_ref_2 = param_ty_ids[0..param_index], + }); + + return result_id; }, .indirect => { // TODO: Represent function pointers properly. // For now, just use an usize type. - return try self.sizeType(); + return try self.resolveType(Type.usize, .indirect); }, }, .Pointer => { const ptr_info = ty.ptrInfo(mod); - // Note: Don't cache this pointer type, it would mess up the recursive pointer functionality - // in ptrType()! - const storage_class = self.spvStorageClass(ptr_info.flags.address_space); - const ptr_ty_ref = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class); + const ptr_ty_id = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class); if (ptr_info.flags.size != .Slice) { - return ptr_ty_ref; + return ptr_ty_id; } - const size_ty_ref = try self.sizeType(); - return self.spv.resolve(.{ .struct_type = .{ - .member_types = &.{ ptr_ty_ref, size_ty_ref }, - .member_names = &.{ - try self.spv.resolveString("ptr"), - try self.spv.resolveString("len"), - }, - } }); + const size_ty_id = try self.resolveType(Type.usize, .direct); + return self.spv.structType( + &.{ ptr_ty_id, size_ty_id }, + &.{ "ptr", "len" }, + ); }, .Vector => { - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; - const elem_ty = ty.childType(mod); - const elem_ty_ref = try self.resolveType(elem_ty, .indirect); + // TODO: Make `.direct`. + const elem_ty_id = try self.resolveType(elem_ty, .indirect); const len = ty.vectorLen(mod); - const ty_ref = if (self.isVector(ty)) - try self.spv.vectorType(len, elem_ty_ref) - else - try self.spv.arrayType(len, elem_ty_ref); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + if (self.isVector(ty)) { + return try self.spv.vectorType(len, elem_ty_id); + } else { + return try self.arrayType(len, elem_ty_id); + } }, .Struct => { - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; - const struct_type = switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { - const member_types = try self.gpa.alloc(CacheRef, tuple.values.len); + const member_types = try self.gpa.alloc(IdRef, tuple.values.len); defer self.gpa.free(member_types); var member_index: usize = 0; @@ -1604,13 +1654,11 @@ const DeclGen = struct { member_index += 1; } - const ty_ref = try self.spv.resolve(.{ .struct_type = .{ - .name = try self.resolveTypeName(ty), - .member_types = member_types[0..member_index], - } }); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + const result_id = try self.spv.structType(member_types[0..member_index], null); + const type_name = try self.resolveTypeName(ty); + defer self.gpa.free(type_name); + try self.spv.debugName(result_id, type_name); + return result_id; }, .struct_type => ip.loadStructType(ty.toIntern()), else => unreachable, @@ -1620,10 +1668,10 @@ const DeclGen = struct { return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct); } - var member_types = std.ArrayList(CacheRef).init(self.gpa); + var member_types = std.ArrayList(IdRef).init(self.gpa); defer member_types.deinit(); - var member_names = std.ArrayList(CacheString).init(self.gpa); + var member_names = std.ArrayList([]const u8).init(self.gpa); defer member_names.deinit(); var it = struct_type.iterateRuntimeOrder(ip); @@ -1637,17 +1685,14 @@ const DeclGen = struct { const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index}); try member_types.append(try self.resolveType(field_ty, .indirect)); - try member_names.append(try self.spv.resolveString(ip.stringToSlice(field_name))); + try member_names.append(ip.stringToSlice(field_name)); } - const ty_ref = try self.spv.resolve(.{ .struct_type = .{ - .name = try self.resolveTypeName(ty), - .member_types = member_types.items, - .member_names = member_names.items, - } }); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + const result_id = try self.spv.structType(member_types.items, member_names.items); + const type_name = try self.resolveTypeName(ty); + defer self.gpa.free(type_name); + try self.spv.debugName(result_id, type_name); + return result_id; }, .Optional => { const payload_ty = ty.optionalChild(mod); @@ -1658,77 +1703,58 @@ const DeclGen = struct { return try self.resolveType(Type.bool, .indirect); } - const payload_ty_ref = try self.resolveType(payload_ty, .indirect); + const payload_ty_id = try self.resolveType(payload_ty, .indirect); if (ty.optionalReprIsPayload(mod)) { // Optional is actually a pointer or a slice. - return payload_ty_ref; + return payload_ty_id; } - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; + const bool_ty_id = try self.resolveType(Type.bool, .indirect); - const bool_ty_ref = try self.resolveType(Type.bool, .indirect); - - const ty_ref = try self.spv.resolve(.{ .struct_type = .{ - .member_types = &.{ payload_ty_ref, bool_ty_ref }, - .member_names = &.{ - try self.spv.resolveString("payload"), - try self.spv.resolveString("valid"), - }, - } }); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + return try self.spv.structType( + &.{ payload_ty_id, bool_ty_id }, + &.{ "payload", "valid" }, + ); }, .Union => return try self.resolveUnionType(ty), - .ErrorSet => return try self.intType(.unsigned, 16), + .ErrorSet => return try self.resolveType(Type.u16, repr), .ErrorUnion => { const payload_ty = ty.errorUnionPayload(mod); - const error_ty_ref = try self.resolveType(Type.anyerror, .indirect); + const error_ty_id = try self.resolveType(Type.anyerror, .indirect); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { - return error_ty_ref; + return error_ty_id; } - if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref; + const payload_ty_id = try self.resolveType(payload_ty, .indirect); - const payload_ty_ref = try self.resolveType(payload_ty, .indirect); - - var member_types: [2]CacheRef = undefined; - var member_names: [2]CacheString = undefined; + var member_types: [2]IdRef = undefined; + var member_names: [2][]const u8 = undefined; if (eu_layout.error_first) { // Put the error first - member_types = .{ error_ty_ref, payload_ty_ref }; - member_names = .{ - try self.spv.resolveString("error"), - try self.spv.resolveString("payload"), - }; + member_types = .{ error_ty_id, payload_ty_id }; + member_names = .{ "error", "payload" }; // TODO: ABI padding? } else { // Put the payload first. - member_types = .{ payload_ty_ref, error_ty_ref }; - member_names = .{ - try self.spv.resolveString("payload"), - try self.spv.resolveString("error"), - }; + member_types = .{ payload_ty_id, error_ty_id }; + member_names = .{ "payload", "error" }; // TODO: ABI padding? } - const ty_ref = try self.spv.resolve(.{ .struct_type = .{ - .name = try self.resolveTypeName(ty), - .member_types = &member_types, - .member_names = &member_names, - } }); - - try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref }); - return ty_ref; + return try self.spv.structType(&member_types, &member_names); }, .Opaque => { - return try self.spv.resolve(.{ - .opaque_type = .{ - .name = .none, // TODO - }, + const type_name = try self.resolveTypeName(ty); + defer self.gpa.free(type_name); + + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypeOpaque, .{ + .id_result = result_id, + .literal_string = type_name, }); + return result_id; }, .Null, @@ -1736,9 +1762,10 @@ const DeclGen = struct { .EnumLiteral, .ComptimeFloat, .ComptimeInt, + .Type, => unreachable, // Must be comptime. - else => |tag| return self.todo("Implement zig type '{}'", .{tag}), + .Frame, .AnyFrame => unreachable, // TODO } } @@ -1887,7 +1914,6 @@ const DeclGen = struct { result_ty: Type, ty: Type, /// Always in direct representation. - ty_ref: CacheRef, ty_id: IdRef, /// True if the input is an array type. is_array: bool, @@ -1947,14 +1973,13 @@ const DeclGen = struct { @memset(results, undefined); const ty = if (is_array) result_ty.scalarType(mod) else result_ty; - const ty_ref = try self.resolveType(ty, .direct); + const ty_id = try self.resolveType(ty, .direct); return .{ .dg = self, .result_ty = result_ty, .ty = ty, - .ty_ref = ty_ref, - .ty_id = self.typeId(ty_ref), + .ty_id = ty_id, .is_array = is_array, .results = results, }; @@ -1981,16 +2006,13 @@ const DeclGen = struct { /// TODO is to also write out the error as a function call parameter, and to somehow fetch /// the name of an error in the text executor. fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void { - const anyerror_ty_ref = try self.resolveType(Type.anyerror, .direct); - const ptr_anyerror_ty_ref = try self.ptrType(Type.anyerror, .CrossWorkgroup); - const void_ty_ref = try self.resolveType(Type.void, .direct); - - const kernel_proto_ty_ref = try self.spv.resolve(.{ - .function_type = .{ - .return_type = void_ty_ref, - .parameters = &.{ptr_anyerror_ty_ref}, - }, + const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct); + const ptr_anyerror_ty = try self.module.ptrType(.{ + .child = Type.anyerror.toIntern(), + .flags = .{ .address_space = .global }, }); + const ptr_anyerror_ty_id = try self.resolveType(ptr_anyerror_ty, .direct); + const kernel_proto_ty_id = try self.functionType(Type.void, &.{ptr_anyerror_ty}); const test_id = self.spv.declPtr(spv_test_decl_index).result_id; @@ -2002,20 +2024,20 @@ const DeclGen = struct { const section = &self.spv.sections.functions; try section.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = self.typeId(void_ty_ref), + .id_result_type = try self.resolveType(Type.void, .direct), .id_result = kernel_id, .function_control = .{}, - .function_type = self.typeId(kernel_proto_ty_ref), + .function_type = kernel_proto_ty_id, }); try section.emit(self.spv.gpa, .OpFunctionParameter, .{ - .id_result_type = self.typeId(ptr_anyerror_ty_ref), + .id_result_type = ptr_anyerror_ty_id, .id_result = p_error_id, }); try section.emit(self.spv.gpa, .OpLabel, .{ .id_result = self.spv.allocId(), }); try section.emit(self.spv.gpa, .OpFunctionCall, .{ - .id_result_type = self.typeId(anyerror_ty_ref), + .id_result_type = anyerror_ty_id, .id_result = error_id, .function = test_id, }); @@ -2047,17 +2069,17 @@ const DeclGen = struct { .func => { assert(decl.typeOf(mod).zigTypeTag(mod) == .Fn); const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; - const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type)); + const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type)); - const prototype_ty_ref = try self.resolveType(decl.typeOf(mod), .direct); + const prototype_ty_id = try self.resolveType(decl.typeOf(mod), .direct); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = self.typeId(return_ty_ref), + .id_result_type = return_ty_id, .id_result = result_id, .function_control = switch (fn_info.cc) { .Inline => .{ .Inline = true }, else => .{}, }, - .function_type = self.typeId(prototype_ty_ref), + .function_type = prototype_ty_id, }); comptime assert(zig_call_abi_ver == 3); @@ -2066,7 +2088,7 @@ const DeclGen = struct { const param_ty = Type.fromInterned(param_ty_index); if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const param_type_id = try self.resolveTypeId(param_ty); + const param_type_id = try self.resolveType(param_ty, .direct); const arg_result_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, @@ -2122,10 +2144,10 @@ const DeclGen = struct { const final_storage_class = self.spvStorageClass(decl.@"addrspace"); assert(final_storage_class != .Generic); // These should be instance globals - const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class); + const ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{ - .id_result_type = self.typeId(ptr_ty_ref), + .id_result_type = ptr_ty_id, .id_result = result_id, .storage_class = final_storage_class, }); @@ -2145,22 +2167,18 @@ const DeclGen = struct { try self.spv.declareDeclDeps(spv_decl_index, &.{}); - const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), .Function); + const ptr_ty_id = try self.ptrType(decl.typeOf(mod), .Function); if (maybe_init_val) |init_val| { // TODO: Combine with resolveAnonDecl? - const void_ty_ref = try self.resolveType(Type.void, .direct); - const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{ - .return_type = void_ty_ref, - .parameters = &.{}, - } }); + const initializer_proto_ty_id = try self.functionType(Type.void, &.{}); const initializer_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = self.typeId(void_ty_ref), + .id_result_type = try self.resolveType(Type.void, .direct), .id_result = initializer_id, .function_control = .{}, - .function_type = self.typeId(initializer_proto_ty_ref), + .function_type = initializer_proto_ty_id, }); const root_block_id = self.spv.allocId(); @@ -2183,7 +2201,7 @@ const DeclGen = struct { try self.spv.debugNameFmt(initializer_id, "initializer of {s}", .{fqn}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ - .id_result_type = self.typeId(ptr_ty_ref), + .id_result_type = ptr_ty_id, .id_result = result_id, .set = try self.spv.importInstructionSet(.zig), .instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere... @@ -2191,7 +2209,7 @@ const DeclGen = struct { }); } else { try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ - .id_result_type = self.typeId(ptr_ty_ref), + .id_result_type = ptr_ty_id, .id_result = result_id, .set = try self.spv.importInstructionSet(.zig), .instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere... @@ -2202,12 +2220,12 @@ const DeclGen = struct { } } - fn intFromBool(self: *DeclGen, result_ty_ref: CacheRef, condition_id: IdRef) !IdRef { - const zero_id = try self.constInt(result_ty_ref, 0); - const one_id = try self.constInt(result_ty_ref, 1); + fn intFromBool(self: *DeclGen, ty: Type, condition_id: IdRef) !IdRef { + const zero_id = try self.constInt(ty, 0, .direct); + const one_id = try self.constInt(ty, 1, .direct); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSelect, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = try self.resolveType(ty, .direct), .id_result = result_id, .condition = condition_id, .object_1 = one_id, @@ -2222,15 +2240,12 @@ const DeclGen = struct { const mod = self.module; return switch (ty.zigTypeTag(mod)) { .Bool => blk: { - const direct_bool_ty_ref = try self.resolveType(ty, .direct); - const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); - const zero_id = try self.constInt(indirect_bool_ty_ref, 0); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{ - .id_result_type = self.typeId(direct_bool_ty_ref), + .id_result_type = try self.resolveType(Type.bool, .direct), .id_result = result_id, .operand_1 = operand_id, - .operand_2 = zero_id, + .operand_2 = try self.constBool(false, .indirect), }); break :blk result_id; }, @@ -2243,20 +2258,17 @@ const DeclGen = struct { fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { const mod = self.module; return switch (ty.zigTypeTag(mod)) { - .Bool => blk: { - const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); - break :blk self.intFromBool(indirect_bool_ty_ref, operand_id); - }, + .Bool => try self.intFromBool(Type.u1, operand_id), else => operand_id, }; } fn extractField(self: *DeclGen, result_ty: Type, object: IdRef, field: u32) !IdRef { - const result_ty_ref = try self.resolveType(result_ty, .indirect); + const result_ty_id = try self.resolveType(result_ty, .indirect); const result_id = self.spv.allocId(); const indexes = [_]u32{field}; try self.func.body.emit(self.spv.gpa, .OpCompositeExtract, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .composite = object, .indexes = &indexes, @@ -2270,13 +2282,13 @@ const DeclGen = struct { }; fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef { - const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); + const indirect_value_ty_id = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ .Volatile = options.is_volatile, }; try self.func.body.emit(self.spv.gpa, .OpLoad, .{ - .id_result_type = self.typeId(indirect_value_ty_ref), + .id_result_type = indirect_value_ty_id, .id_result = result_id, .pointer = ptr_id, .memory_access = access, @@ -2488,7 +2500,8 @@ const DeclGen = struct { const result_ty = self.typeOfIndex(inst); const shift_ty = self.typeOf(bin_op.rhs); - const shift_ty_ref = try self.resolveType(shift_ty, .direct); + const scalar_result_ty_id = try self.resolveType(result_ty.scalarType(mod), .direct); + const scalar_shift_ty_id = try self.resolveType(shift_ty.scalarType(mod), .direct); const info = self.arithmeticTypeInfo(result_ty); switch (info.class) { @@ -2505,7 +2518,7 @@ const DeclGen = struct { // Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that, // so just manually upcast it if required. - const shift_id = if (shift_ty_ref != wip.ty_ref) blk: { + const shift_id = if (scalar_shift_ty_id != scalar_result_ty_id) blk: { const shift_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpUConvert, .{ .id_result_type = wip.ty_id, @@ -2529,7 +2542,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, unsigned, args); } - result_id.* = try self.normalize(wip.ty_ref, value_id, info); + result_id.* = try self.normalize(wip.ty, value_id, info); } return try wip.finalize(); } @@ -2622,7 +2635,7 @@ const DeclGen = struct { /// - Signed integers are also sign extended if they are negative. /// All other values are returned unmodified (this makes strange integer /// wrapping easier to use in generic operations). - fn normalize(self: *DeclGen, ty_ref: CacheRef, value_id: IdRef, info: ArithmeticTypeInfo) !IdRef { + fn normalize(self: *DeclGen, ty: Type, value_id: IdRef, info: ArithmeticTypeInfo) !IdRef { switch (info.class) { .integer, .bool, .float => return value_id, .composite_integer => unreachable, // TODO @@ -2630,9 +2643,9 @@ const DeclGen = struct { .unsigned => { const mask_value = if (info.bits == 64) 0xFFFF_FFFF_FFFF_FFFF else (@as(u64, 1) << @as(u6, @intCast(info.bits))) - 1; const result_id = self.spv.allocId(); - const mask_id = try self.constInt(ty_ref, mask_value); + const mask_id = try self.constInt(ty, mask_value, .direct); try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = try self.resolveType(ty, .direct), .id_result = result_id, .operand_1 = value_id, .operand_2 = mask_id, @@ -2641,17 +2654,17 @@ const DeclGen = struct { }, .signed => { // Shift left and right so that we can copy the sight bit that way. - const shift_amt_id = try self.constInt(ty_ref, info.backing_bits - info.bits); + const shift_amt_id = try self.constInt(ty, info.backing_bits - info.bits, .direct); const left_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpShiftLeftLogical, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = try self.resolveType(ty, .direct), .id_result = left_id, .base = value_id, .shift = shift_amt_id, }); const right_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpShiftRightArithmetic, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = try self.resolveType(ty, .direct), .id_result = right_id, .base = left_id, .shift = shift_amt_id, @@ -2667,13 +2680,13 @@ const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const ty = self.typeOfIndex(inst); - const ty_ref = try self.resolveType(ty, .direct); + const ty_id = try self.resolveType(ty, .direct); const info = self.arithmeticTypeInfo(ty); switch (info.class) { .composite_integer => unreachable, // TODO .integer, .strange_integer => { - const zero_id = try self.constInt(ty_ref, 0); - const one_id = try self.constInt(ty_ref, 1); + const zero_id = try self.constInt(ty, 0, .direct); + const one_id = try self.constInt(ty, 1, .direct); // (a ^ b) > 0 const bin_bitwise_id = try self.binOpSimple(ty, lhs_id, rhs_id, .OpBitwiseXor); @@ -2696,14 +2709,14 @@ const DeclGen = struct { const negative_div_id = try self.arithOp(ty, negative_div_lhs, rhs_abs, .OpFDiv, .OpSDiv, .OpUDiv); const negated_negative_div_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSNegate, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = ty_id, .id_result = negated_negative_div_id, .operand = negative_div_id, }); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSelect, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = ty_id, .id_result = result_id, .condition = is_positive_id, .object_1 = positive_div_id, @@ -2728,7 +2741,7 @@ const DeclGen = struct { fn floor(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { const target = self.getTarget(); - const ty_ref = try self.resolveType(ty, .direct); + const ty_id = try self.resolveType(ty, .direct); const ext_inst: Word = switch (target.os.tag) { .opencl => 25, .vulkan => 8, @@ -2742,7 +2755,7 @@ const DeclGen = struct { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpExtInst, .{ - .id_result_type = self.typeId(ty_ref), + .id_result_type = ty_id, .id_result = result_id, .set = set_id, .instruction = .{ .inst = ext_inst }, @@ -2819,7 +2832,7 @@ const DeclGen = struct { // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. - result_id.* = try self.normalize(wip.ty_ref, value_id, info); + result_id.* = try self.normalize(wip.ty, value_id, info); } return try wip.finalize(); @@ -2897,11 +2910,12 @@ const DeclGen = struct { const operand_ty = self.typeOf(extra.lhs); const ov_ty = result_ty.structFieldType(1, self.module); - const bool_ty_ref = try self.resolveType(Type.bool, .direct); - const cmp_ty_ref = if (self.isVector(operand_ty)) - try self.spv.vectorType(operand_ty.vectorLen(mod), bool_ty_ref) + const bool_ty_id = try self.resolveType(Type.bool, .direct); + const cmp_ty_id = if (self.isVector(operand_ty)) + // TODO: Resolving a vector type with .direct should return a SPIR-V vector + try self.spv.vectorType(operand_ty.vectorLen(mod), try self.resolveType(Type.bool, .direct)) else - bool_ty_ref; + bool_ty_id; const info = self.arithmeticTypeInfo(operand_ty); switch (info.class) { @@ -2929,7 +2943,7 @@ const DeclGen = struct { }); // Normalize the result so that the comparisons go well - result_id.* = try self.normalize(wip_result.ty_ref, value_id, info); + result_id.* = try self.normalize(wip_result.ty, value_id, info); const overflowed_id = switch (info.signedness) { .unsigned => blk: { @@ -2937,7 +2951,7 @@ const DeclGen = struct { // For subtraction the conditions need to be swapped. const overflowed_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, ucmp, .{ - .id_result_type = self.typeId(cmp_ty_ref), + .id_result_type = cmp_ty_id, .id_result = overflowed_id, .operand_1 = result_id.*, .operand_2 = lhs_elem_id, @@ -2963,9 +2977,9 @@ const DeclGen = struct { // = (rhs < 0) == (lhs > value) const rhs_lt_zero_id = self.spv.allocId(); - const zero_id = try self.constInt(wip_result.ty_ref, 0); + const zero_id = try self.constInt(wip_result.ty, 0, .direct); try self.func.body.emit(self.spv.gpa, .OpSLessThan, .{ - .id_result_type = self.typeId(cmp_ty_ref), + .id_result_type = cmp_ty_id, .id_result = rhs_lt_zero_id, .operand_1 = rhs_elem_id, .operand_2 = zero_id, @@ -2973,7 +2987,7 @@ const DeclGen = struct { const value_gt_lhs_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, scmp, .{ - .id_result_type = self.typeId(cmp_ty_ref), + .id_result_type = cmp_ty_id, .id_result = value_gt_lhs_id, .operand_1 = lhs_elem_id, .operand_2 = result_id.*, @@ -2981,7 +2995,7 @@ const DeclGen = struct { const overflowed_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalEqual, .{ - .id_result_type = self.typeId(cmp_ty_ref), + .id_result_type = cmp_ty_id, .id_result = overflowed_id, .operand_1 = rhs_lt_zero_id, .operand_2 = value_gt_lhs_id, @@ -2990,7 +3004,7 @@ const DeclGen = struct { }, }; - ov_id.* = try self.intFromBool(wip_ov.ty_ref, overflowed_id); + ov_id.* = try self.intFromBool(wip_ov.ty, overflowed_id); } return try self.constructStruct( @@ -3022,9 +3036,9 @@ const DeclGen = struct { var wip_ov = try self.elementWise(ov_ty, true); defer wip_ov.deinit(); - const zero_id = try self.constInt(wip_result.ty_ref, 0); - const zero_ov_id = try self.constInt(wip_ov.ty_ref, 0); - const one_ov_id = try self.constInt(wip_ov.ty_ref, 1); + const zero_id = try self.constInt(wip_result.ty, 0, .direct); + const zero_ov_id = try self.constInt(wip_ov.ty, 0, .direct); + const one_ov_id = try self.constInt(wip_ov.ty, 1, .direct); for (wip_result.results, wip_ov.results, 0..) |*result_id, *ov_id, i| { const lhs_elem_id = try wip_result.elementAt(operand_ty, lhs, i); @@ -3065,15 +3079,17 @@ const DeclGen = struct { const result_ty = self.typeOfIndex(inst); const operand_ty = self.typeOf(extra.lhs); const shift_ty = self.typeOf(extra.rhs); - const shift_ty_ref = try self.resolveType(shift_ty, .direct); + const scalar_shift_ty_id = try self.resolveType(shift_ty.scalarType(mod), .direct); + const scalar_operand_ty_id = try self.resolveType(operand_ty.scalarType(mod), .direct); const ov_ty = result_ty.structFieldType(1, self.module); - const bool_ty_ref = try self.resolveType(Type.bool, .direct); - const cmp_ty_ref = if (self.isVector(operand_ty)) - try self.spv.vectorType(operand_ty.vectorLen(mod), bool_ty_ref) + const bool_ty_id = try self.resolveType(Type.bool, .direct); + const cmp_ty_id = if (self.isVector(operand_ty)) + // TODO: Resolving a vector type with .direct should return a SPIR-V vector + try self.spv.vectorType(operand_ty.vectorLen(mod), try self.resolveType(Type.bool, .direct)) else - bool_ty_ref; + bool_ty_id; const info = self.arithmeticTypeInfo(operand_ty); switch (info.class) { @@ -3092,7 +3108,7 @@ const DeclGen = struct { // Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that, // so just manually upcast it if required. - const shift_id = if (shift_ty_ref != wip_result.ty_ref) blk: { + const shift_id = if (scalar_shift_ty_id != scalar_operand_ty_id) blk: { const shift_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpUConvert, .{ .id_result_type = wip_result.ty_id, @@ -3109,7 +3125,7 @@ const DeclGen = struct { .base = lhs_elem_id, .shift = shift_id, }); - result_id.* = try self.normalize(wip_result.ty_ref, value_id, info); + result_id.* = try self.normalize(wip_result.ty, value_id, info); const right_shift_id = self.spv.allocId(); switch (info.signedness) { @@ -3133,13 +3149,13 @@ const DeclGen = struct { const overflowed_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{ - .id_result_type = self.typeId(cmp_ty_ref), + .id_result_type = cmp_ty_id, .id_result = overflowed_id, .operand_1 = lhs_elem_id, .operand_2 = right_shift_id, }); - ov_id.* = try self.intFromBool(wip_ov.ty_ref, overflowed_id); + ov_id.* = try self.intFromBool(wip_ov.ty, overflowed_id); } return try self.constructStruct( @@ -3204,8 +3220,7 @@ const DeclGen = struct { defer wip.deinit(); const elem_ty = if (wip.is_array) operand_ty.scalarType(mod) else operand_ty; - const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const elem_ty_id = self.typeId(elem_ty_ref); + const elem_ty_id = try self.resolveType(elem_ty, .direct); for (wip.results, 0..) |*result_id, i| { const elem = try wip.elementAt(operand_ty, operand, i); @@ -3230,6 +3245,8 @@ const DeclGen = struct { .id_ref_4 = &.{elem}, }); + // TODO: Comparison should be removed.. + // Its valid because SpvModule caches numeric types if (wip.ty_id == elem_ty_id) { result_id.* = tmp; continue; @@ -3276,8 +3293,7 @@ const DeclGen = struct { const operand = try self.resolve(reduce.operand); const operand_ty = self.typeOf(reduce.operand); const scalar_ty = operand_ty.scalarType(mod); - const scalar_ty_ref = try self.resolveType(scalar_ty, .direct); - const scalar_ty_id = self.typeId(scalar_ty_ref); + const scalar_ty_id = try self.resolveType(scalar_ty, .direct); const info = self.arithmeticTypeInfo(operand_ty); @@ -3351,7 +3367,7 @@ const DeclGen = struct { for (wip.results, 0..) |*result_id, i| { const elem = try mask.elemValue(mod, i); if (elem.isUndef(mod)) { - result_id.* = try self.spv.constUndef(wip.ty_ref); + result_id.* = try self.spv.constUndef(wip.ty_id); continue; } @@ -3366,11 +3382,10 @@ const DeclGen = struct { } fn indicesToIds(self: *DeclGen, indices: []const u32) ![]IdRef { - const index_ty_ref = try self.intType(.unsigned, 32); const ids = try self.gpa.alloc(IdRef, indices.len); errdefer self.gpa.free(ids); for (indices, ids) |index, *id| { - id.* = try self.constInt(index_ty_ref, index); + id.* = try self.constInt(Type.u32, index, .direct); } return ids; @@ -3378,13 +3393,13 @@ const DeclGen = struct { fn accessChainId( self: *DeclGen, - result_ty_ref: CacheRef, + result_ty_id: IdRef, base: IdRef, indices: []const IdRef, ) !IdRef { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpInBoundsAccessChain, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .base = base, .indexes = indices, @@ -3398,18 +3413,18 @@ const DeclGen = struct { /// is the latter and PtrAccessChain is the former. fn accessChain( self: *DeclGen, - result_ty_ref: CacheRef, + result_ty_id: IdRef, base: IdRef, indices: []const u32, ) !IdRef { const ids = try self.indicesToIds(indices); defer self.gpa.free(ids); - return try self.accessChainId(result_ty_ref, base, ids); + return try self.accessChainId(result_ty_id, base, ids); } fn ptrAccessChain( self: *DeclGen, - result_ty_ref: CacheRef, + result_ty_id: IdRef, base: IdRef, element: IdRef, indices: []const u32, @@ -3419,7 +3434,7 @@ const DeclGen = struct { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .base = base, .element = element, @@ -3430,21 +3445,21 @@ const DeclGen = struct { fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { const mod = self.module; - const result_ty_ref = try self.resolveType(result_ty, .direct); + const result_ty_id = try self.resolveType(result_ty, .direct); switch (ptr_ty.ptrSize(mod)) { .One => { // Pointer to array // TODO: Is this correct? - return try self.accessChainId(result_ty_ref, ptr_id, &.{offset_id}); + return try self.accessChainId(result_ty_id, ptr_id, &.{offset_id}); }, .C, .Many => { - return try self.ptrAccessChain(result_ty_ref, ptr_id, offset_id, &.{}); + return try self.ptrAccessChain(result_ty_id, ptr_id, offset_id, &.{}); }, .Slice => { // TODO: This is probably incorrect. A slice should be returned here, though this is what llvm does. const slice_ptr_id = try self.extractField(result_ty, ptr_id, 0); - return try self.ptrAccessChain(result_ty_ref, slice_ptr_id, offset_id, &.{}); + return try self.ptrAccessChain(result_ty_id, slice_ptr_id, offset_id, &.{}); }, } } @@ -3467,12 +3482,12 @@ const DeclGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); const offset_ty = self.typeOf(bin_op.rhs); - const offset_ty_ref = try self.resolveType(offset_ty, .direct); + const offset_ty_id = try self.resolveType(offset_ty, .direct); const result_ty = self.typeOfIndex(inst); const negative_offset_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSNegate, .{ - .id_result_type = self.typeId(offset_ty_ref), + .id_result_type = offset_ty_id, .id_result = negative_offset_id, .operand = offset_id, }); @@ -3490,7 +3505,7 @@ const DeclGen = struct { const mod = self.module; var cmp_lhs_id = lhs_id; var cmp_rhs_id = rhs_id; - const bool_ty_ref = try self.resolveType(Type.bool, .direct); + const bool_ty_id = try self.resolveType(Type.bool, .direct); const op_ty = switch (ty.zigTypeTag(mod)) { .Int, .Bool, .Float => ty, .Enum => ty.intTagType(mod), @@ -3502,7 +3517,7 @@ const DeclGen = struct { cmp_lhs_id = self.spv.allocId(); cmp_rhs_id = self.spv.allocId(); - const usize_ty_id = self.typeId(try self.sizeType()); + const usize_ty_id = try self.resolveType(Type.usize, .direct); try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{ .id_result_type = usize_ty_id, @@ -3564,20 +3579,20 @@ const DeclGen = struct { const pl_eq_id = try self.cmp(op, Type.bool, payload_ty, lhs_pl_id, rhs_pl_id); const lhs_not_valid_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = lhs_not_valid_id, .operand = lhs_valid_id, }); const impl_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = impl_id, .operand_1 = lhs_not_valid_id, .operand_2 = pl_eq_id, }); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = result_id, .operand_1 = valid_eq_id, .operand_2 = impl_id, @@ -3590,14 +3605,14 @@ const DeclGen = struct { const impl_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = impl_id, .operand_1 = lhs_valid_id, .operand_2 = pl_neq_id, }); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = result_id, .operand_1 = valid_neq_id, .operand_2 = impl_id, @@ -3665,7 +3680,7 @@ const DeclGen = struct { const result_id = self.spv.allocId(); try self.func.body.emitRaw(self.spv.gpa, opcode, 4); - self.func.body.writeOperand(spec.IdResultType, self.typeId(bool_ty_ref)); + self.func.body.writeOperand(spec.IdResultType, bool_ty_id); self.func.body.writeOperand(spec.IdResult, result_id); self.func.body.writeOperand(spec.IdResultType, cmp_lhs_id); self.func.body.writeOperand(spec.IdResultType, cmp_rhs_id); @@ -3698,6 +3713,7 @@ const DeclGen = struct { return try self.cmp(op, result_ty, ty, lhs_id, rhs_id); } + /// Bitcast one type to another. Note: both types, input, output are expected in **direct** representation. fn bitCast( self: *DeclGen, dst_ty: Type, @@ -3705,13 +3721,11 @@ const DeclGen = struct { src_id: IdRef, ) !IdRef { const mod = self.module; - const src_ty_ref = try self.resolveType(src_ty, .direct); - const dst_ty_ref = try self.resolveType(dst_ty, .direct); - const src_key = self.spv.cache.lookup(src_ty_ref); - const dst_key = self.spv.cache.lookup(dst_ty_ref); + const src_ty_id = try self.resolveType(src_ty, .direct); + const dst_ty_id = try self.resolveType(dst_ty, .direct); const result_id = blk: { - if (src_ty_ref == dst_ty_ref) { + if (src_ty_id == dst_ty_id) { break :blk src_id; } @@ -3721,7 +3735,7 @@ const DeclGen = struct { if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ - .id_result_type = self.typeId(dst_ty_ref), + .id_result_type = dst_ty_id, .id_result = result_id, .integer_value = src_id, }); @@ -3731,10 +3745,11 @@ const DeclGen = struct { // We can only use OpBitcast for specific conversions: between numerical types, and // between pointers. If the resolved spir-v types fall into this category then emit OpBitcast, // otherwise use a temporary and perform a pointer cast. - if ((src_key.isNumericalType() and dst_key.isNumericalType()) or (src_key == .ptr_type and dst_key == .ptr_type)) { + const can_bitcast = (src_ty.isNumeric(mod) and dst_ty.isNumeric(mod)) or (src_ty.isPtrAtRuntime(mod) and dst_ty.isPtrAtRuntime(mod)); + if (can_bitcast) { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(dst_ty_ref), + .id_result_type = dst_ty_id, .id_result = result_id, .operand = src_id, }); @@ -3742,13 +3757,13 @@ const DeclGen = struct { break :blk result_id; } - const dst_ptr_ty_ref = try self.ptrType(dst_ty, .Function); + const dst_ptr_ty_id = try self.ptrType(dst_ty, .Function); const tmp_id = try self.alloc(src_ty, .{ .storage_class = .Function }); try self.store(src_ty, tmp_id, src_id, .{}); const casted_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(dst_ptr_ty_ref), + .id_result_type = dst_ptr_ty_id, .id_result = casted_ptr_id, .operand = tmp_id, }); @@ -3761,7 +3776,7 @@ const DeclGen = struct { // should we change the representation of strange integers? if (dst_ty.zigTypeTag(mod) == .Int) { const info = self.arithmeticTypeInfo(dst_ty); - return try self.normalize(dst_ty_ref, result_id, info); + return try self.normalize(dst_ty, result_id, info); } return result_id; @@ -3811,7 +3826,7 @@ const DeclGen = struct { // type, we don't need to normalize when growing the type. The // representation is already the same. if (dst_info.bits < src_info.bits) { - result_id.* = try self.normalize(wip.ty_ref, value_id, dst_info); + result_id.* = try self.normalize(wip.ty, value_id, dst_info); } else { result_id.* = value_id; } @@ -3820,7 +3835,7 @@ const DeclGen = struct { } fn intFromPtr(self: *DeclGen, operand_id: IdRef) !IdRef { - const result_type_id = try self.resolveTypeId(Type.usize); + const result_type_id = try self.resolveType(Type.usize, .direct); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{ .id_result_type = result_type_id, @@ -3841,21 +3856,21 @@ const DeclGen = struct { const operand_ty = self.typeOf(ty_op.operand); const operand_id = try self.resolve(ty_op.operand); const result_ty = self.typeOfIndex(inst); - const result_ty_ref = try self.resolveType(result_ty, .direct); - return try self.floatFromInt(result_ty_ref, operand_ty, operand_id); + return try self.floatFromInt(result_ty, operand_ty, operand_id); } - fn floatFromInt(self: *DeclGen, result_ty_ref: CacheRef, operand_ty: Type, operand_id: IdRef) !IdRef { + fn floatFromInt(self: *DeclGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef { const operand_info = self.arithmeticTypeInfo(operand_ty); const result_id = self.spv.allocId(); + const result_ty_id = try self.resolveType(result_ty, .direct); switch (operand_info.signedness) { .signed => try self.func.body.emit(self.spv.gpa, .OpConvertSToF, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .signed_value = operand_id, }), .unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertUToF, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .unsigned_value = operand_id, }), @@ -3872,16 +3887,16 @@ const DeclGen = struct { fn intFromFloat(self: *DeclGen, result_ty: Type, operand_id: IdRef) !IdRef { const result_info = self.arithmeticTypeInfo(result_ty); - const result_ty_ref = try self.resolveType(result_ty, .direct); + const result_ty_id = try self.resolveType(result_ty, .direct); const result_id = self.spv.allocId(); switch (result_info.signedness) { .signed => try self.func.body.emit(self.spv.gpa, .OpConvertFToS, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .float_value = operand_id, }), .unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertFToU, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = result_id, .float_value = operand_id, }), @@ -3898,7 +3913,7 @@ const DeclGen = struct { defer wip.deinit(); for (wip.results, 0..) |*result_id, i| { const elem_id = try wip.elementAt(Type.bool, operand_id, i); - result_id.* = try self.intFromBool(wip.ty_ref, elem_id); + result_id.* = try self.intFromBool(wip.ty, elem_id); } return try wip.finalize(); } @@ -3907,7 +3922,7 @@ const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const dest_ty = self.typeOfIndex(inst); - const dest_ty_id = try self.resolveTypeId(dest_ty); + const dest_ty_id = try self.resolveType(dest_ty, .direct); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpFConvert, .{ @@ -3957,18 +3972,17 @@ const DeclGen = struct { const slice_ty = self.typeOfIndex(inst); const elem_ptr_ty = slice_ty.slicePtrFieldType(mod); - const elem_ptr_ty_ref = try self.resolveType(elem_ptr_ty, .direct); - const size_ty_ref = try self.sizeType(); + const elem_ptr_ty_id = try self.resolveType(elem_ptr_ty, .direct); const array_ptr_id = try self.resolve(ty_op.operand); - const len_id = try self.constInt(size_ty_ref, array_ty.arrayLen(mod)); + const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct); const elem_ptr_id = if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) // Note: The pointer is something like *opaque{}, so we need to bitcast it to the element type. try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id) else // Convert the pointer-to-array to a pointer to the first element. - try self.accessChain(elem_ptr_ty_ref, array_ptr_id, &.{0}); + try self.accessChain(elem_ptr_ty_id, array_ptr_id, &.{0}); return try self.constructStruct( slice_ty, @@ -4092,8 +4106,8 @@ const DeclGen = struct { const array_ty = ty.childType(mod); const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - const usize_ty_ref = try self.resolveType(Type.usize, .direct); - return self.spv.constInt(usize_ty_ref, array_ty.arrayLenIncludingSentinel(mod) * abi_size); + const size = array_ty.arrayLenIncludingSentinel(mod) * abi_size; + return try self.constInt(Type.usize, size, .direct); }, .Many, .C => unreachable, } @@ -4142,10 +4156,10 @@ const DeclGen = struct { const index_id = try self.resolve(bin_op.rhs); const ptr_ty = self.typeOfIndex(inst); - const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); + const ptr_ty_id = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); - return try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{}); + return try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{}); } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { @@ -4158,10 +4172,10 @@ const DeclGen = struct { const index_id = try self.resolve(bin_op.rhs); const ptr_ty = slice_ty.slicePtrFieldType(mod); - const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); + const ptr_ty_id = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); - const elem_ptr = try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{}); + const elem_ptr = try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{}); return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) }); } @@ -4169,14 +4183,14 @@ const DeclGen = struct { const mod = self.module; // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. - const elem_ptr_ty_ref = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod))); + const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod))); if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. - return try self.accessChainId(elem_ptr_ty_ref, ptr_id, &.{index_id}); + return try self.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id}); } else { // Resulting pointer type is the same as the ptr_ty, so use ptrAccessChain - return try self.ptrAccessChain(elem_ptr_ty_ref, ptr_id, index_id, &.{}); + return try self.ptrAccessChain(elem_ptr_ty_id, ptr_id, index_id, &.{}); } } @@ -4209,11 +4223,11 @@ const DeclGen = struct { // For now, just generate a temporary and use that. // TODO: This backend probably also should use isByRef from llvm... - const elem_ptr_ty_ref = try self.ptrType(elem_ty, .Function); + const elem_ptr_ty_id = try self.ptrType(elem_ty, .Function); const tmp_id = try self.alloc(array_ty, .{ .storage_class = .Function }); try self.store(array_ty, tmp_id, array_id, .{}); - const elem_ptr_id = try self.accessChainId(elem_ptr_ty_ref, tmp_id, &.{index_id}); + const elem_ptr_id = try self.accessChainId(elem_ptr_ty_id, tmp_id, &.{index_id}); return try self.load(elem_ty, elem_ptr_id, .{}); } @@ -4238,13 +4252,13 @@ const DeclGen = struct { const scalar_ty = vector_ty.scalarType(mod); const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(mod)); - const scalar_ptr_ty_ref = try self.ptrType(scalar_ty, storage_class); + const scalar_ptr_ty_id = try self.ptrType(scalar_ty, storage_class); const vector_ptr = try self.resolve(data.vector_ptr); const index = try self.resolve(extra.lhs); const operand = try self.resolve(extra.rhs); - const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_ref, vector_ptr, &.{index}); + const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index}); try self.store(scalar_ty, elem_ptr_id, operand, .{ .is_volatile = vector_ptr_ty.isVolatilePtr(mod), }); @@ -4260,7 +4274,7 @@ const DeclGen = struct { if (layout.tag_size == 0) return; const tag_ty = un_ty.unionTagTypeSafety(mod).?; - const tag_ptr_ty_ref = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod))); + const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod))); const union_ptr_id = try self.resolve(bin_op.lhs); const new_tag_id = try self.resolve(bin_op.rhs); @@ -4268,7 +4282,7 @@ const DeclGen = struct { if (!layout.has_payload) { try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) }); } else { - const ptr_id = try self.accessChain(tag_ptr_ty_ref, union_ptr_id, &.{layout.tag_index}); + const ptr_id = try self.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index}); try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) }); } } @@ -4298,6 +4312,8 @@ const DeclGen = struct { // union type, then get the field pointer and pointer-cast it to the // right type to store it. Finally load the entire union. + // Note: The result here is not cached, because it generates runtime code. + const mod = self.module; const ip = &mod.intern_pool; const union_ty = mod.typeToUnion(ty).?; @@ -4316,28 +4332,26 @@ const DeclGen = struct { } else 0; if (!layout.has_payload) { - const tag_ty_ref = try self.resolveType(tag_ty, .direct); - return try self.constInt(tag_ty_ref, tag_int); + return try self.constInt(tag_ty, tag_int, .direct); } const tmp_id = try self.alloc(ty, .{ .storage_class = .Function }); if (layout.tag_size != 0) { - const tag_ty_ref = try self.resolveType(tag_ty, .direct); - const tag_ptr_ty_ref = try self.ptrType(tag_ty, .Function); - const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))}); - const tag_id = try self.constInt(tag_ty_ref, tag_int); + const tag_ptr_ty_id = try self.ptrType(tag_ty, .Function); + const ptr_id = try self.accessChain(tag_ptr_ty_id, tmp_id, &.{@as(u32, @intCast(layout.tag_index))}); + const tag_id = try self.constInt(tag_ty, tag_int, .direct); try self.store(tag_ty, ptr_id, tag_id, .{}); } const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function); - const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index}); - const active_pl_ptr_ty_ref = try self.ptrType(payload_ty, .Function); + const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function); + const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index}); + const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function); const active_pl_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(active_pl_ptr_ty_ref), + .id_result_type = active_pl_ptr_ty_id, .id_result = active_pl_ptr_id, .operand = pl_ptr_id, }); @@ -4396,13 +4410,13 @@ const DeclGen = struct { const tmp_id = try self.alloc(object_ty, .{ .storage_class = .Function }); try self.store(object_ty, tmp_id, object_id, .{}); - const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function); - const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index}); + const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function); + const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index}); - const active_pl_ptr_ty_ref = try self.ptrType(field_ty, .Function); + const active_pl_ptr_ty_id = try self.ptrType(field_ty, .Function); const active_pl_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(active_pl_ptr_ty_ref), + .id_result_type = active_pl_ptr_ty_id, .id_result = active_pl_ptr_id, .operand = pl_ptr_id, }); @@ -4419,9 +4433,7 @@ const DeclGen = struct { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const parent_ty = ty_pl.ty.toType().childType(mod); - const res_ty = try self.resolveType(ty_pl.ty.toType(), .indirect); - const usize_ty = Type.usize; - const usize_ty_ref = try self.resolveType(usize_ty, .direct); + const result_ty_id = try self.resolveType(ty_pl.ty.toType(), .indirect); const field_ptr = try self.resolve(extra.field_ptr); const field_ptr_int = try self.intFromPtr(field_ptr); @@ -4430,13 +4442,13 @@ const DeclGen = struct { const base_ptr_int = base_ptr_int: { if (field_offset == 0) break :base_ptr_int field_ptr_int; - const field_offset_id = try self.constInt(usize_ty_ref, field_offset); - break :base_ptr_int try self.binOpSimple(usize_ty, field_ptr_int, field_offset_id, .OpISub); + const field_offset_id = try self.constInt(Type.usize, field_offset, .direct); + break :base_ptr_int try self.binOpSimple(Type.usize, field_ptr_int, field_offset_id, .OpISub); }; const base_ptr = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ - .id_result_type = self.spv.resultId(res_ty), + .id_result_type = result_ty_id, .id_result = base_ptr, .integer_value = base_ptr_int, }); @@ -4451,7 +4463,7 @@ const DeclGen = struct { object_ptr: IdRef, field_index: u32, ) !IdRef { - const result_ty_ref = try self.resolveType(result_ptr_ty, .direct); + const result_ty_id = try self.resolveType(result_ptr_ty, .direct); const mod = self.module; const object_ty = object_ptr_ty.childType(mod); @@ -4459,7 +4471,7 @@ const DeclGen = struct { .Struct => switch (object_ty.containerLayout(mod)) { .@"packed" => unreachable, // TODO else => { - return try self.accessChain(result_ty_ref, object_ptr, &.{field_index}); + return try self.accessChain(result_ty_id, object_ptr, &.{field_index}); }, }, .Union => switch (object_ty.containerLayout(mod)) { @@ -4469,16 +4481,16 @@ const DeclGen = struct { if (!layout.has_payload) { // Asked to get a pointer to a zero-sized field. Just lower this // to undefined, there is no reason to make it be a valid pointer. - return try self.spv.constUndef(result_ty_ref); + return try self.spv.constUndef(result_ty_id); } const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(mod)); - const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, storage_class); - const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, object_ptr, &.{layout.payload_index}); + const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class); + const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index}); const active_pl_ptr_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpBitcast, .{ - .id_result_type = self.typeId(result_ty_ref), + .id_result_type = result_ty_id, .id_result = active_pl_ptr_id, .operand = pl_ptr_id, }); @@ -4506,7 +4518,7 @@ const DeclGen = struct { }; // Allocate a function-local variable, with possible initializer. - // This function returns a pointer to a variable of type `ty_ref`, + // This function returns a pointer to a variable of type `ty`, // which is in the Generic address space. The variable is actually // placed in the Function address space. fn alloc( @@ -4514,13 +4526,13 @@ const DeclGen = struct { ty: Type, options: AllocOptions, ) !IdRef { - const ptr_fn_ty_ref = try self.ptrType(ty, .Function); + const ptr_fn_ty_id = try self.ptrType(ty, .Function); // SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to // directly generate them into func.prologue instead of the body. const var_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{ - .id_result_type = self.typeId(ptr_fn_ty_ref), + .id_result_type = ptr_fn_ty_id, .id_result = var_id, .storage_class = .Function, .initializer = options.initializer, @@ -4533,9 +4545,9 @@ const DeclGen = struct { switch (options.storage_class) { .Generic => { - const ptr_gn_ty_ref = try self.ptrType(ty, .Generic); + const ptr_gn_ty_id = try self.ptrType(ty, .Generic); // Convert to a generic pointer - return self.castToGeneric(self.typeId(ptr_gn_ty_ref), var_id); + return self.castToGeneric(ptr_gn_ty_id, var_id); }, .Function => return var_id, else => unreachable, @@ -4563,9 +4575,9 @@ const DeclGen = struct { assert(self.control_flow == .structured); const result_id = self.spv.allocId(); - const block_id_ty_ref = try self.intType(.unsigned, 32); + const block_id_ty_id = try self.resolveType(Type.u32, .direct); try self.func.body.emitRaw(self.spv.gpa, .OpPhi, @intCast(2 + incoming.len * 2)); // result type + result + variable/parent... - self.func.body.writeOperand(spec.IdResultType, self.typeId(block_id_ty_ref)); + self.func.body.writeOperand(spec.IdResultType, block_id_ty_id); self.func.body.writeOperand(spec.IdRef, result_id); for (incoming) |incoming_block| { @@ -4663,8 +4675,8 @@ const DeclGen = struct { // Make sure that we are still in a block when exiting the function. // TODO: Can we get rid of that? try self.beginSpvBlock(self.spv.allocId()); - const block_id_ty_ref = try self.intType(.unsigned, 32); - return try self.spv.constUndef(block_id_ty_ref); + const block_id_ty_id = try self.resolveType(Type.u32, .direct); + return try self.spv.constUndef(block_id_ty_id); } // The top-most merge actually only has a single source, the @@ -4745,7 +4757,7 @@ const DeclGen = struct { assert(block.label != null); const result_id = self.spv.allocId(); - const result_type_id = try self.resolveTypeId(ty); + const result_type_id = try self.resolveType(ty, .direct); try self.func.body.emitRaw( self.spv.gpa, @@ -4781,12 +4793,11 @@ const DeclGen = struct { assert(cf.block_stack.items.len > 0); // Check if the target of the branch was this current block. - const block_id_ty_ref = try self.intType(.unsigned, 32); - const this_block = try self.constInt(block_id_ty_ref, @intFromEnum(inst)); + const this_block = try self.constInt(Type.u32, @intFromEnum(inst), .direct); const jump_to_this_block_id = self.spv.allocId(); - const bool_ty_ref = try self.resolveType(Type.bool, .direct); + const bool_ty_id = try self.resolveType(Type.bool, .direct); try self.func.body.emit(self.spv.gpa, .OpIEqual, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = jump_to_this_block_id, .operand_1 = next_block, .operand_2 = this_block, @@ -4862,8 +4873,7 @@ const DeclGen = struct { try self.store(operand_ty, block_result_var_id, operand_id, .{}); } - const block_id_ty_ref = try self.intType(.unsigned, 32); - const next_block = try self.constInt(block_id_ty_ref, @intFromEnum(br.block_inst)); + const next_block = try self.constInt(Type.u32, @intFromEnum(br.block_inst), .direct); try self.structuredBreak(next_block); }, .unstructured => |cf| { @@ -5026,8 +5036,7 @@ const DeclGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - const no_err_id = try self.constInt(err_ty_ref, 0); + const no_err_id = try self.constInt(Type.anyerror, 0, .direct); return try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = no_err_id }); } else { return try self.func.body.emit(self.spv.gpa, .OpReturn, {}); @@ -5051,8 +5060,7 @@ const DeclGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - const no_err_id = try self.constInt(err_ty_ref, 0); + const no_err_id = try self.constInt(Type.anyerror, 0, .direct); return try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = no_err_id }); } else { return try self.func.body.emit(self.spv.gpa, .OpReturn, {}); @@ -5076,8 +5084,7 @@ const DeclGen = struct { const err_union_ty = self.typeOf(pl_op.operand); const payload_ty = self.typeOfIndex(inst); - const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - const bool_ty_ref = try self.resolveType(Type.bool, .direct); + const bool_ty_id = try self.resolveType(Type.bool, .direct); const eu_layout = self.errorUnionLayout(payload_ty); @@ -5087,10 +5094,10 @@ const DeclGen = struct { else err_union_id; - const zero_id = try self.constInt(err_ty_ref, 0); + const zero_id = try self.constInt(Type.anyerror, 0, .direct); const is_err_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = is_err_id, .operand_1 = err_id, .operand_2 = zero_id, @@ -5142,11 +5149,11 @@ const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand); - const err_ty_ref = try self.resolveType(Type.anyerror, .direct); + const err_ty_id = try self.resolveType(Type.anyerror, .direct); if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // No error possible, so just return undefined. - return try self.spv.constUndef(err_ty_ref); + return try self.spv.constUndef(err_ty_id); } const payload_ty = err_union_ty.errorUnionPayload(mod); @@ -5185,11 +5192,11 @@ const DeclGen = struct { return operand_id; } - const payload_ty_ref = try self.resolveType(payload_ty, .indirect); + const payload_ty_id = try self.resolveType(payload_ty, .indirect); var members: [2]IdRef = undefined; members[eu_layout.errorFieldIndex()] = operand_id; - members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(payload_ty_ref); + members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(payload_ty_id); var types: [2]Type = undefined; types[eu_layout.errorFieldIndex()] = Type.anyerror; @@ -5203,15 +5210,14 @@ const DeclGen = struct { const err_union_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); const payload_ty = self.typeOf(ty_op.operand); - const err_ty_ref = try self.resolveType(Type.anyerror, .direct); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { - return try self.constInt(err_ty_ref, 0); + return try self.constInt(Type.anyerror, 0, .direct); } var members: [2]IdRef = undefined; - members[eu_layout.errorFieldIndex()] = try self.constInt(err_ty_ref, 0); + members[eu_layout.errorFieldIndex()] = try self.constInt(Type.anyerror, 0, .direct); members[eu_layout.payloadFieldIndex()] = try self.convertToIndirect(payload_ty, operand_id); var types: [2]Type = undefined; @@ -5229,7 +5235,7 @@ const DeclGen = struct { const optional_ty = if (is_pointer) operand_ty.childType(mod) else operand_ty; const payload_ty = optional_ty.optionalChild(mod); - const bool_ty_ref = try self.resolveType(Type.bool, .direct); + const bool_ty_id = try self.resolveType(Type.bool, .direct); if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. @@ -5248,8 +5254,8 @@ const DeclGen = struct { else loaded_id; - const payload_ty_ref = try self.resolveType(ptr_ty, .direct); - const null_id = try self.spv.constNull(payload_ty_ref); + const payload_ty_id = try self.resolveType(ptr_ty, .direct); + const null_id = try self.spv.constNull(payload_ty_id); const op: std.math.CompareOperator = switch (pred) { .is_null => .eq, .is_non_null => .neq, @@ -5261,8 +5267,8 @@ const DeclGen = struct { if (is_pointer) { if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod)); - const bool_ptr_ty = try self.ptrType(Type.bool, storage_class); - const tag_ptr_id = try self.accessChain(bool_ptr_ty, operand_id, &.{1}); + const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class); + const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1}); break :blk try self.load(Type.bool, tag_ptr_id, .{}); } @@ -5283,7 +5289,7 @@ const DeclGen = struct { // Invert condition const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = result_id, .operand = is_non_null_id, }); @@ -5305,8 +5311,7 @@ const DeclGen = struct { const payload_ty = err_union_ty.errorUnionPayload(mod); const eu_layout = self.errorUnionLayout(payload_ty); - const bool_ty_ref = try self.resolveType(Type.bool, .direct); - const err_ty_ref = try self.resolveType(Type.anyerror, .direct); + const bool_ty_id = try self.resolveType(Type.bool, .direct); const error_id = if (!eu_layout.payload_has_bits) operand_id @@ -5315,10 +5320,10 @@ const DeclGen = struct { const result_id = self.spv.allocId(); const operands = .{ - .id_result_type = self.typeId(bool_ty_ref), + .id_result_type = bool_ty_id, .id_result = result_id, .operand_1 = error_id, - .operand_2 = try self.constInt(err_ty_ref, 0), + .operand_2 = try self.constInt(Type.anyerror, 0, .direct), }; switch (pred) { .is_err => try self.func.body.emit(self.spv.gpa, .OpINotEqual, operands), @@ -5351,7 +5356,7 @@ const DeclGen = struct { const optional_ty = operand_ty.childType(mod); const payload_ty = optional_ty.optionalChild(mod); const result_ty = self.typeOfIndex(inst); - const result_ty_ref = try self.resolveType(result_ty, .direct); + const result_ty_id = try self.resolveType(result_ty, .direct); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // There is no payload, but we still need to return a valid pointer. @@ -5364,7 +5369,7 @@ const DeclGen = struct { return try self.bitCast(result_ty, operand_ty, operand_id); } - return try self.accessChain(result_ty_ref, operand_id, &.{0}); + return try self.accessChain(result_ty_id, operand_id, &.{0}); } fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { @@ -5440,7 +5445,7 @@ const DeclGen = struct { }; // First, pre-allocate the labels for the cases. - const first_case_label = self.spv.allocIds(num_cases); + const case_labels = self.spv.allocIds(num_cases); // We always need the default case - if zig has none, we will generate unreachable there. const default = self.spv.allocId(); @@ -5471,7 +5476,7 @@ const DeclGen = struct { const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; - const label: IdRef = @enumFromInt(@intFromEnum(first_case_label) + case_i); + const label = case_labels.at(case_i); for (items) |item| { const value = (try self.air.value(item, mod)) orelse unreachable; @@ -5511,7 +5516,7 @@ const DeclGen = struct { const case_body: []const Air.Inst.Index = @ptrCast(self.air.extra[case.end + items.len ..][0..case.data.body_len]); extra_index = case.end + case.data.items_len + case_body.len; - const label: IdResult = @enumFromInt(@intFromEnum(first_case_label) + case_i); + const label = case_labels.at(case_i); try self.beginSpvBlock(label); @@ -5566,9 +5571,8 @@ const DeclGen = struct { const mod = self.module; const decl = mod.declPtr(self.decl_index); const path = decl.getFileScope(mod).sub_file_path; - const src_fname_id = try self.spv.resolveSourceFileName(path); try self.func.body.emit(self.spv.gpa, .OpLine, .{ - .file = src_fname_id, + .file = try self.spv.resolveString(path), .line = self.base_line + dbg_stmt.line + 1, .column = dbg_stmt.column + 1, }); @@ -5737,7 +5741,7 @@ const DeclGen = struct { const fn_info = mod.typeToFunc(zig_fn_ty).?; const return_type = fn_info.return_type; - const result_type_ref = try self.resolveFnReturnType(Type.fromInterned(return_type)); + const result_type_id = try self.resolveFnReturnType(Type.fromInterned(return_type)); const result_id = self.spv.allocId(); const callee_id = try self.resolve(pl_op.operand); @@ -5758,7 +5762,7 @@ const DeclGen = struct { } try self.func.body.emit(self.spv.gpa, .OpFunctionCall, .{ - .id_result_type = self.typeId(result_type_ref), + .id_result_type = result_type_id, .id_result = result_id, .function = callee_id, .id_ref_3 = params[0..n_params], diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig index 1b6824c9c8..2cbb873d30 100644 --- a/src/codegen/spirv/Assembler.zig +++ b/src/codegen/spirv/Assembler.zig @@ -9,10 +9,9 @@ const Opcode = spec.Opcode; const Word = spec.Word; const IdRef = spec.IdRef; const IdResult = spec.IdResult; +const StorageClass = spec.StorageClass; const SpvModule = @import("Module.zig"); -const CacheRef = SpvModule.CacheRef; -const CacheKey = SpvModule.CacheKey; /// Represents a token in the assembly template. const Token = struct { @@ -127,16 +126,16 @@ const AsmValue = union(enum) { value: IdRef, /// This result-value represents a type registered into the module's type system. - ty: CacheRef, + ty: IdRef, /// Retrieve the result-id of this AsmValue. Asserts that this AsmValue /// is of a variant that allows the result to be obtained (not an unresolved /// forward declaration, not in the process of being declared, etc). - pub fn resultId(self: AsmValue, spv: *const SpvModule) IdRef { + pub fn resultId(self: AsmValue) IdRef { return switch (self) { .just_declared, .unresolved_forward_reference => unreachable, .value => |result| result, - .ty => |ref| spv.resultId(ref), + .ty => |result| result, }; } }; @@ -292,9 +291,10 @@ fn processInstruction(self: *Assembler) !void { /// refers to the result. fn processTypeInstruction(self: *Assembler) !AsmValue { const operands = self.inst.operands.items; - const ref = switch (self.inst.opcode) { - .OpTypeVoid => try self.spv.resolve(.void_type), - .OpTypeBool => try self.spv.resolve(.bool_type), + const section = &self.spv.sections.types_globals_constants; + const id = switch (self.inst.opcode) { + .OpTypeVoid => try self.spv.voidType(), + .OpTypeBool => try self.spv.boolType(), .OpTypeInt => blk: { const signedness: std.builtin.Signedness = switch (operands[2].literal32) { 0 => .unsigned, @@ -317,43 +317,49 @@ fn processTypeInstruction(self: *Assembler) !AsmValue { return self.fail(0, "{} is not a valid bit count for floats (expected 16, 32 or 64)", .{bits}); }, } - break :blk try self.spv.resolve(.{ .float_type = .{ .bits = @intCast(bits) } }); + break :blk try self.spv.floatType(@intCast(bits)); + }, + .OpTypeVector => blk: { + const child_type = try self.resolveRefId(operands[1].ref_id); + break :blk try self.spv.vectorType(operands[2].literal32, child_type); }, - .OpTypeVector => try self.spv.resolve(.{ .vector_type = .{ - .component_type = try self.resolveTypeRef(operands[1].ref_id), - .component_count = operands[2].literal32, - } }), .OpTypeArray => { // TODO: The length of an OpTypeArray is determined by a constant (which may be a spec constant), // and so some consideration must be taken when entering this in the type system. return self.todo("process OpTypeArray", .{}); }, .OpTypePointer => blk: { - break :blk try self.spv.resolve(.{ - .ptr_type = .{ - .storage_class = @enumFromInt(operands[1].value), - .child_type = try self.resolveTypeRef(operands[2].ref_id), - // TODO: This should be a proper reference resolved via OpTypeForwardPointer - .fwd = @enumFromInt(std.math.maxInt(u32)), - }, + const storage_class: StorageClass = @enumFromInt(operands[1].value); + const child_type = try self.resolveRefId(operands[2].ref_id); + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypePointer, .{ + .id_result = result_id, + .storage_class = storage_class, + .type = child_type, }); + break :blk result_id; }, .OpTypeFunction => blk: { const param_operands = operands[2..]; - const param_types = try self.spv.gpa.alloc(CacheRef, param_operands.len); + const return_type = try self.resolveRefId(operands[1].ref_id); + + const param_types = try self.spv.gpa.alloc(IdRef, param_operands.len); defer self.spv.gpa.free(param_types); - for (param_types, 0..) |*param, i| { - param.* = try self.resolveTypeRef(param_operands[i].ref_id); + for (param_types, param_operands) |*param, operand| { + param.* = try self.resolveRefId(operand.ref_id); } - break :blk try self.spv.resolve(.{ .function_type = .{ - .return_type = try self.resolveTypeRef(operands[1].ref_id), - .parameters = param_types, - } }); + const result_id = self.spv.allocId(); + try section.emit(self.spv.gpa, .OpTypeFunction, .{ + .id_result = result_id, + .return_type = return_type, + .id_ref_2 = param_types, + }); + break :blk result_id; }, else => return self.todo("process type instruction {s}", .{@tagName(self.inst.opcode)}), }; - return AsmValue{ .ty = ref }; + return AsmValue{ .ty = id }; } /// Emit `self.inst` into `self.spv` and `self.func`, and return the AsmValue @@ -410,7 +416,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue { .ref_id => |index| { const result = try self.resolveRef(index); try section.ensureUnusedCapacity(self.spv.gpa, 1); - section.writeOperand(spec.IdRef, result.resultId(self.spv)); + section.writeOperand(spec.IdRef, result.resultId()); }, .string => |offset| { const text = std.mem.sliceTo(self.inst.string_bytes.items[offset..], 0); @@ -459,18 +465,9 @@ fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue { } } -/// Resolve a value reference as type. -fn resolveTypeRef(self: *Assembler, ref: AsmValue.Ref) !CacheRef { +fn resolveRefId(self: *Assembler, ref: AsmValue.Ref) !IdRef { const value = try self.resolveRef(ref); - switch (value) { - .just_declared, .unresolved_forward_reference => unreachable, - .ty => |ty_ref| return ty_ref, - else => { - const name = self.value_map.keys()[ref]; - // TODO: Improve source location. - return self.fail(0, "expected operand %{s} to refer to a type", .{name}); - }, - } + return value.resultId(); } /// Attempt to parse an instruction into `self.inst`. @@ -709,22 +706,41 @@ fn parseContextDependentNumber(self: *Assembler) !void { assert(self.inst.opcode == .OpConstant or self.inst.opcode == .OpSpecConstant); const tok = self.currentToken(); - const result_type_ref = try self.resolveTypeRef(self.inst.operands.items[0].ref_id); - const result_type = self.spv.cache.lookup(result_type_ref); - switch (result_type) { - .int_type => |int| { - try self.parseContextDependentInt(int.signedness, int.bits); - }, - .float_type => |float| { - switch (float.bits) { + const result = try self.resolveRef(self.inst.operands.items[0].ref_id); + const result_id = result.resultId(); + // We are going to cheat a little bit: The types we are interested in, int and float, + // are added to the module and cached via self.spv.intType and self.spv.floatType. Therefore, + // we can determine the width of these types by directly checking the cache. + // This only works if the Assembler and codegen both use spv.intType and spv.floatType though. + // We don't expect there to be many of these types, so just look it up every time. + // TODO: Count be improved to be a little bit more efficent. + + { + var it = self.spv.cache.int_types.iterator(); + while (it.next()) |entry| { + const id = entry.value_ptr.*; + if (id != result_id) continue; + const info = entry.key_ptr.*; + return try self.parseContextDependentInt(info.signedness, info.bits); + } + } + + { + var it = self.spv.cache.float_types.iterator(); + while (it.next()) |entry| { + const id = entry.value_ptr.*; + if (id != result_id) continue; + const info = entry.key_ptr.*; + switch (info.bits) { 16 => try self.parseContextDependentFloat(16), 32 => try self.parseContextDependentFloat(32), 64 => try self.parseContextDependentFloat(64), - else => return self.fail(tok.start, "cannot parse {}-bit float literal", .{float.bits}), + else => return self.fail(tok.start, "cannot parse {}-bit info literal", .{info.bits}), } - }, - else => return self.fail(tok.start, "cannot parse literal constant", .{}), + } } + + return self.fail(tok.start, "cannot parse literal constant", .{}); } fn parseContextDependentInt(self: *Assembler, signedness: std.builtin.Signedness, width: u32) !void { diff --git a/src/codegen/spirv/Cache.zig b/src/codegen/spirv/Cache.zig deleted file mode 100644 index e8460e1d79..0000000000 --- a/src/codegen/spirv/Cache.zig +++ /dev/null @@ -1,1125 +0,0 @@ -//! This file implements an InternPool-like structure that caches -//! SPIR-V types and constants. Instead of generating type and -//! constant instructions directly, we first keep a representation -//! in a compressed database. This is then only later turned into -//! actual SPIR-V instructions. -//! Note: This cache is insertion-ordered. This means that we -//! can materialize the SPIR-V instructions in the proper order, -//! as SPIR-V requires that the type is emitted before use. -//! Note: According to SPIR-V spec section 2.8, Types and Variables, -//! non-pointer non-aggrerate types (which includes matrices and -//! vectors) must have a _unique_ representation in the final binary. - -const std = @import("std"); -const assert = std.debug.assert; -const Allocator = std.mem.Allocator; - -const Section = @import("Section.zig"); -const Module = @import("Module.zig"); - -const spec = @import("spec.zig"); -const Opcode = spec.Opcode; -const IdResult = spec.IdResult; -const StorageClass = spec.StorageClass; - -const InternPool = @import("../../InternPool.zig"); - -const Self = @This(); - -map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, -items: std.MultiArrayList(Item) = .{}, -extra: std.ArrayListUnmanaged(u32) = .{}, - -string_bytes: std.ArrayListUnmanaged(u8) = .{}, -strings: std.AutoArrayHashMapUnmanaged(void, u32) = .{}, - -recursive_ptrs: std.AutoHashMapUnmanaged(Ref, void) = .{}, - -const Item = struct { - tag: Tag, - /// The result-id that this item uses. - result_id: IdResult, - /// The Tag determines how this should be interpreted. - data: u32, -}; - -const Tag = enum { - // -- Types - /// Simple type that has no additional data. - /// data is SimpleType. - type_simple, - /// Signed integer type - /// data is number of bits - type_int_signed, - /// Unsigned integer type - /// data is number of bits - type_int_unsigned, - /// Floating point type - /// data is number of bits - type_float, - /// Vector type - /// data is payload to VectorType - type_vector, - /// Array type - /// data is payload to ArrayType - type_array, - /// Function (proto)type - /// data is payload to FunctionType - type_function, - // /// Pointer type in the CrossWorkgroup storage class - // /// data is child type - // type_ptr_generic, - // /// Pointer type in the CrossWorkgroup storage class - // /// data is child type - // type_ptr_crosswgp, - // /// Pointer type in the Function storage class - // /// data is child type - // type_ptr_function, - /// Simple pointer type that does not have any decorations. - /// data is payload to SimplePointerType - type_ptr_simple, - /// A forward declaration for a pointer. - /// data is ForwardPointerType - type_fwd_ptr, - /// Simple structure type that does not have any decorations. - /// data is payload to SimpleStructType - type_struct_simple, - /// Simple structure type that does not have any decorations, but does - /// have member names trailing. - /// data is payload to SimpleStructType - type_struct_simple_with_member_names, - /// Opaque type. - /// data is name string. - type_opaque, - - // -- Values - /// Value of type u8 - /// data is value - uint8, - /// Value of type u32 - /// data is value - uint32, - // TODO: More specialized tags here. - /// Integer value for signed values that are smaller than 32 bits. - /// data is pointer to Int32 - int_small, - /// Integer value for unsigned values that are smaller than 32 bits. - /// data is pointer to UInt32 - uint_small, - /// Integer value for signed values that are beteen 32 and 64 bits. - /// data is pointer to Int64 - int_large, - /// Integer value for unsinged values that are beteen 32 and 64 bits. - /// data is pointer to UInt64 - uint_large, - /// Value of type f16 - /// data is value - float16, - /// Value of type f32 - /// data is value - float32, - /// Value of type f64 - /// data is payload to Float16 - float64, - /// Undefined value - /// data is type - undef, - /// Null value - /// data is type - null, - /// Bool value that is true - /// data is (bool) type - bool_true, - /// Bool value that is false - /// data is (bool) type - bool_false, - - const SimpleType = enum { - void, - bool, - }; - - const VectorType = Key.VectorType; - const ArrayType = Key.ArrayType; - - // Trailing: - // - [param_len]Ref: parameter types. - const FunctionType = struct { - param_len: u32, - return_type: Ref, - }; - - const SimplePointerType = struct { - storage_class: StorageClass, - child_type: Ref, - fwd: Ref, - }; - - const ForwardPointerType = struct { - storage_class: StorageClass, - zig_child_type: InternPool.Index, - }; - - /// Trailing: - /// - [members_len]Ref: Member types. - /// - [members_len]String: Member names, -- ONLY if the tag is type_struct_simple_with_member_names - const SimpleStructType = struct { - /// (optional) The name of the struct. - name: String, - /// Number of members that this struct has. - members_len: u32, - }; - - const Float64 = struct { - // Low-order 32 bits of the value. - low: u32, - // High-order 32 bits of the value. - high: u32, - - fn encode(value: f64) Float64 { - const bits = @as(u64, @bitCast(value)); - return .{ - .low = @truncate(bits), - .high = @truncate(bits >> 32), - }; - } - - fn decode(self: Float64) f64 { - const bits = @as(u64, self.low) | (@as(u64, self.high) << 32); - return @bitCast(bits); - } - }; - - const Int32 = struct { - ty: Ref, - value: i32, - }; - - const UInt32 = struct { - ty: Ref, - value: u32, - }; - - const UInt64 = struct { - ty: Ref, - low: u32, - high: u32, - - fn encode(ty: Ref, value: u64) Int64 { - return .{ - .ty = ty, - .low = @truncate(value), - .high = @truncate(value >> 32), - }; - } - - fn decode(self: UInt64) u64 { - return @as(u64, self.low) | (@as(u64, self.high) << 32); - } - }; - - const Int64 = struct { - ty: Ref, - low: u32, - high: u32, - - fn encode(ty: Ref, value: i64) Int64 { - return .{ - .ty = ty, - .low = @truncate(@as(u64, @bitCast(value))), - .high = @truncate(@as(u64, @bitCast(value)) >> 32), - }; - } - - fn decode(self: Int64) i64 { - return @as(i64, @bitCast(@as(u64, self.low) | (@as(u64, self.high) << 32))); - } - }; -}; - -pub const Ref = enum(u32) { _ }; - -/// This union represents something that can be interned. This includes -/// types and constants. This structure is used for interfacing with the -/// database: Values described for this structure are ephemeral and stored -/// in a more memory-efficient manner internally. -pub const Key = union(enum) { - // -- Types - void_type, - bool_type, - int_type: IntType, - float_type: FloatType, - vector_type: VectorType, - array_type: ArrayType, - function_type: FunctionType, - ptr_type: PointerType, - fwd_ptr_type: ForwardPointerType, - struct_type: StructType, - opaque_type: OpaqueType, - - // -- values - int: Int, - float: Float, - undef: Undef, - null: Null, - bool: Bool, - - pub const IntType = std.builtin.Type.Int; - pub const FloatType = std.builtin.Type.Float; - - pub const VectorType = struct { - component_type: Ref, - component_count: u32, - }; - - pub const ArrayType = struct { - /// Child type of this array. - element_type: Ref, - /// Reference to a constant. - length: Ref, - /// Type has the 'ArrayStride' decoration. - /// If zero, no stride is present. - stride: u32 = 0, - }; - - pub const FunctionType = struct { - return_type: Ref, - parameters: []const Ref, - }; - - pub const PointerType = struct { - storage_class: StorageClass, - child_type: Ref, - /// Ref to a .fwd_ptr_type. - fwd: Ref, - // TODO: Decorations: - // - Alignment - // - ArrayStride - // - MaxByteOffset - }; - - pub const ForwardPointerType = struct { - zig_child_type: InternPool.Index, - storage_class: StorageClass, - }; - - pub const StructType = struct { - // TODO: Decorations. - /// The name of the structure. Can be `.none`. - name: String = .none, - /// The type of each member. - member_types: []const Ref, - /// Name for each member. May be omitted. - member_names: ?[]const String = null, - - fn memberNames(self: @This()) []const String { - return if (self.member_names) |member_names| member_names else &.{}; - } - }; - - pub const OpaqueType = struct { - name: String = .none, - }; - - pub const Int = struct { - /// The type: any bitness integer. - ty: Ref, - /// The actual value. Only uint64 and int64 types - /// are available here: Smaller types should use these - /// fields. - value: Value, - - pub const Value = union(enum) { - uint64: u64, - int64: i64, - }; - - /// Turns this value into the corresponding 32-bit literal, 2s complement signed. - fn toBits32(self: Int) u32 { - return switch (self.value) { - .uint64 => |val| @intCast(val), - .int64 => |val| if (val < 0) @bitCast(@as(i32, @intCast(val))) else @intCast(val), - }; - } - - fn toBits64(self: Int) u64 { - return switch (self.value) { - .uint64 => |val| val, - .int64 => |val| @bitCast(val), - }; - } - - fn to(self: Int, comptime T: type) T { - return switch (self.value) { - inline else => |val| @intCast(val), - }; - } - }; - - /// Represents a numberic value of some type. - pub const Float = struct { - /// The type: 16, 32, or 64-bit float. - ty: Ref, - /// The actual value. - value: Value, - - pub const Value = union(enum) { - float16: f16, - float32: f32, - float64: f64, - }; - }; - - pub const Undef = struct { - ty: Ref, - }; - - pub const Null = struct { - ty: Ref, - }; - - pub const Bool = struct { - ty: Ref, - value: bool, - }; - - fn hash(self: Key) u32 { - var hasher = std.hash.Wyhash.init(0); - switch (self) { - .float => |float| { - std.hash.autoHash(&hasher, float.ty); - switch (float.value) { - .float16 => |value| std.hash.autoHash(&hasher, @as(u16, @bitCast(value))), - .float32 => |value| std.hash.autoHash(&hasher, @as(u32, @bitCast(value))), - .float64 => |value| std.hash.autoHash(&hasher, @as(u64, @bitCast(value))), - } - }, - .function_type => |func| { - std.hash.autoHash(&hasher, func.return_type); - for (func.parameters) |param_type| { - std.hash.autoHash(&hasher, param_type); - } - }, - .struct_type => |struct_type| { - std.hash.autoHash(&hasher, struct_type.name); - for (struct_type.member_types) |member_type| { - std.hash.autoHash(&hasher, member_type); - } - for (struct_type.memberNames()) |member_name| { - std.hash.autoHash(&hasher, member_name); - } - }, - inline else => |key| std.hash.autoHash(&hasher, key), - } - return @truncate(hasher.final()); - } - - fn eql(a: Key, b: Key) bool { - const KeyTag = @typeInfo(Key).Union.tag_type.?; - const a_tag: KeyTag = a; - const b_tag: KeyTag = b; - if (a_tag != b_tag) { - return false; - } - return switch (a) { - .function_type => |a_func| { - const b_func = b.function_type; - return a_func.return_type == b_func.return_type and - std.mem.eql(Ref, a_func.parameters, b_func.parameters); - }, - .struct_type => |a_struct| { - const b_struct = b.struct_type; - return a_struct.name == b_struct.name and - std.mem.eql(Ref, a_struct.member_types, b_struct.member_types) and - std.mem.eql(String, a_struct.memberNames(), b_struct.memberNames()); - }, - // TODO: Unroll? - else => std.meta.eql(a, b), - }; - } - - pub const Adapter = struct { - self: *const Self, - - pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool { - _ = b_void; - return ctx.self.lookup(@enumFromInt(b_index)).eql(a); - } - - pub fn hash(ctx: @This(), a: Key) u32 { - _ = ctx; - return a.hash(); - } - }; - - fn toSimpleType(self: Key) Tag.SimpleType { - return switch (self) { - .void_type => .void, - .bool_type => .bool, - else => unreachable, - }; - } - - pub fn isNumericalType(self: Key) bool { - return switch (self) { - .int_type, .float_type => true, - else => false, - }; - } -}; - -pub fn deinit(self: *Self, spv: *const Module) void { - self.map.deinit(spv.gpa); - self.items.deinit(spv.gpa); - self.extra.deinit(spv.gpa); - self.string_bytes.deinit(spv.gpa); - self.strings.deinit(spv.gpa); - self.recursive_ptrs.deinit(spv.gpa); -} - -/// Actually materialize the database into spir-v instructions. -/// This function returns a spir-v section of (only) constant and type instructions. -/// Additionally, decorations, debug names, etc, are all directly emitted into the -/// `spv` module. The section is allocated with `spv.gpa`. -pub fn materialize(self: *const Self, spv: *Module) !Section { - var section = Section{}; - errdefer section.deinit(spv.gpa); - for (self.items.items(.result_id), 0..) |result_id, index| { - try self.emit(spv, result_id, @enumFromInt(index), §ion); - } - return section; -} - -fn emit( - self: *const Self, - spv: *Module, - result_id: IdResult, - ref: Ref, - section: *Section, -) !void { - const key = self.lookup(ref); - const Lit = spec.LiteralContextDependentNumber; - switch (key) { - .void_type => { - try section.emit(spv.gpa, .OpTypeVoid, .{ .id_result = result_id }); - try spv.debugName(result_id, "void"); - }, - .bool_type => { - try section.emit(spv.gpa, .OpTypeBool, .{ .id_result = result_id }); - try spv.debugName(result_id, "bool"); - }, - .int_type => |int| { - try section.emit(spv.gpa, .OpTypeInt, .{ - .id_result = result_id, - .width = int.bits, - .signedness = switch (int.signedness) { - .unsigned => @as(spec.Word, 0), - .signed => 1, - }, - }); - const ui: []const u8 = switch (int.signedness) { - .unsigned => "u", - .signed => "i", - }; - try spv.debugNameFmt(result_id, "{s}{}", .{ ui, int.bits }); - }, - .float_type => |float| { - try section.emit(spv.gpa, .OpTypeFloat, .{ - .id_result = result_id, - .width = float.bits, - }); - try spv.debugNameFmt(result_id, "f{}", .{float.bits}); - }, - .vector_type => |vector| { - try section.emit(spv.gpa, .OpTypeVector, .{ - .id_result = result_id, - .component_type = self.resultId(vector.component_type), - .component_count = vector.component_count, - }); - }, - .array_type => |array| { - try section.emit(spv.gpa, .OpTypeArray, .{ - .id_result = result_id, - .element_type = self.resultId(array.element_type), - .length = self.resultId(array.length), - }); - if (array.stride != 0) { - try spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = array.stride } }); - } - }, - .function_type => |function| { - try section.emitRaw(spv.gpa, .OpTypeFunction, 2 + function.parameters.len); - section.writeOperand(IdResult, result_id); - section.writeOperand(IdResult, self.resultId(function.return_type)); - for (function.parameters) |param_type| { - section.writeOperand(IdResult, self.resultId(param_type)); - } - }, - .ptr_type => |ptr| { - try section.emit(spv.gpa, .OpTypePointer, .{ - .id_result = result_id, - .storage_class = ptr.storage_class, - .type = self.resultId(ptr.child_type), - }); - // TODO: Decorations? - }, - .fwd_ptr_type => |fwd| { - // Only emit the OpTypeForwardPointer if its actually required. - if (self.recursive_ptrs.contains(ref)) { - try section.emit(spv.gpa, .OpTypeForwardPointer, .{ - .pointer_type = result_id, - .storage_class = fwd.storage_class, - }); - } - }, - .struct_type => |struct_type| { - try section.emitRaw(spv.gpa, .OpTypeStruct, 1 + struct_type.member_types.len); - section.writeOperand(IdResult, result_id); - for (struct_type.member_types) |member_type| { - section.writeOperand(IdResult, self.resultId(member_type)); - } - if (self.getString(struct_type.name)) |name| { - try spv.debugName(result_id, name); - } - for (struct_type.memberNames(), 0..) |member_name, i| { - if (self.getString(member_name)) |name| { - try spv.memberDebugName(result_id, @intCast(i), name); - } - } - // TODO: Decorations? - }, - .opaque_type => |opaque_type| { - const name = if (self.getString(opaque_type.name)) |name| name else ""; - try section.emit(spv.gpa, .OpTypeOpaque, .{ - .id_result = result_id, - .literal_string = name, - }); - }, - .int => |int| { - const int_type = self.lookup(int.ty).int_type; - const ty_id = self.resultId(int.ty); - const lit: Lit = switch (int_type.bits) { - 1...32 => .{ .uint32 = int.toBits32() }, - 33...64 => .{ .uint64 = int.toBits64() }, - else => unreachable, - }; - - try section.emit(spv.gpa, .OpConstant, .{ - .id_result_type = ty_id, - .id_result = result_id, - .value = lit, - }); - }, - .float => |float| { - const ty_id = self.resultId(float.ty); - const lit: Lit = switch (float.value) { - .float16 => |value| .{ .uint32 = @as(u16, @bitCast(value)) }, - .float32 => |value| .{ .float32 = value }, - .float64 => |value| .{ .float64 = value }, - }; - try section.emit(spv.gpa, .OpConstant, .{ - .id_result_type = ty_id, - .id_result = result_id, - .value = lit, - }); - }, - .undef => |undef| { - try section.emit(spv.gpa, .OpUndef, .{ - .id_result_type = self.resultId(undef.ty), - .id_result = result_id, - }); - }, - .null => |null_info| { - try section.emit(spv.gpa, .OpConstantNull, .{ - .id_result_type = self.resultId(null_info.ty), - .id_result = result_id, - }); - }, - .bool => |bool_info| switch (bool_info.value) { - true => { - try section.emit(spv.gpa, .OpConstantTrue, .{ - .id_result_type = self.resultId(bool_info.ty), - .id_result = result_id, - }); - }, - false => { - try section.emit(spv.gpa, .OpConstantFalse, .{ - .id_result_type = self.resultId(bool_info.ty), - .id_result = result_id, - }); - }, - }, - } -} - -/// Add a key to this cache. Returns a reference to the key that -/// was added. The corresponding result-id can be queried using -/// self.resultId with the result. -pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref { - const adapter: Key.Adapter = .{ .self = self }; - const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter); - if (entry.found_existing) { - return @enumFromInt(entry.index); - } - const item: Item = switch (key) { - inline .void_type, .bool_type => .{ - .tag = .type_simple, - .result_id = spv.allocId(), - .data = @intFromEnum(key.toSimpleType()), - }, - .int_type => |int| blk: { - const t: Tag = switch (int.signedness) { - .signed => .type_int_signed, - .unsigned => .type_int_unsigned, - }; - break :blk .{ - .tag = t, - .result_id = spv.allocId(), - .data = int.bits, - }; - }, - .float_type => |float| .{ - .tag = .type_float, - .result_id = spv.allocId(), - .data = float.bits, - }, - .vector_type => |vector| .{ - .tag = .type_vector, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, vector), - }, - .array_type => |array| .{ - .tag = .type_array, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, array), - }, - .function_type => |function| blk: { - const extra = try self.addExtra(spv, Tag.FunctionType{ - .param_len = @intCast(function.parameters.len), - .return_type = function.return_type, - }); - try self.extra.appendSlice(spv.gpa, @ptrCast(function.parameters)); - break :blk .{ - .tag = .type_function, - .result_id = spv.allocId(), - .data = extra, - }; - }, - // .ptr_type => |ptr| switch (ptr.storage_class) { - // .Generic => Item{ - // .tag = .type_ptr_generic, - // .result_id = spv.allocId(), - // .data = @intFromEnum(ptr.child_type), - // }, - // .CrossWorkgroup => Item{ - // .tag = .type_ptr_crosswgp, - // .result_id = spv.allocId(), - // .data = @intFromEnum(ptr.child_type), - // }, - // .Function => Item{ - // .tag = .type_ptr_function, - // .result_id = spv.allocId(), - // .data = @intFromEnum(ptr.child_type), - // }, - // else => |storage_class| Item{ - // .tag = .type_ptr_simple, - // .result_id = spv.allocId(), - // .data = try self.addExtra(spv, Tag.SimplePointerType{ - // .storage_class = storage_class, - // .child_type = ptr.child_type, - // }), - // }, - // }, - .ptr_type => |ptr| Item{ - .tag = .type_ptr_simple, - // For this variant we need to steal the ID of the forward-declaration, instead - // of allocating one manually. This will make sure that we get a single result-id - // any possibly forward declared pointer type. - .result_id = self.resultId(ptr.fwd), - .data = try self.addExtra(spv, Tag.SimplePointerType{ - .storage_class = ptr.storage_class, - .child_type = ptr.child_type, - .fwd = ptr.fwd, - }), - }, - .fwd_ptr_type => |fwd| Item{ - .tag = .type_fwd_ptr, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, Tag.ForwardPointerType{ - .zig_child_type = fwd.zig_child_type, - .storage_class = fwd.storage_class, - }), - }, - .struct_type => |struct_type| blk: { - const extra = try self.addExtra(spv, Tag.SimpleStructType{ - .name = struct_type.name, - .members_len = @intCast(struct_type.member_types.len), - }); - try self.extra.appendSlice(spv.gpa, @ptrCast(struct_type.member_types)); - - if (struct_type.member_names) |member_names| { - try self.extra.appendSlice(spv.gpa, @ptrCast(member_names)); - break :blk Item{ - .tag = .type_struct_simple_with_member_names, - .result_id = spv.allocId(), - .data = extra, - }; - } else { - break :blk Item{ - .tag = .type_struct_simple, - .result_id = spv.allocId(), - .data = extra, - }; - } - }, - .opaque_type => |opaque_type| Item{ - .tag = .type_opaque, - .result_id = spv.allocId(), - .data = @intFromEnum(opaque_type.name), - }, - .int => |int| blk: { - const int_type = self.lookup(int.ty).int_type; - if (int_type.signedness == .unsigned and int_type.bits == 8) { - break :blk .{ - .tag = .uint8, - .result_id = spv.allocId(), - .data = int.to(u8), - }; - } else if (int_type.signedness == .unsigned and int_type.bits == 32) { - break :blk .{ - .tag = .uint32, - .result_id = spv.allocId(), - .data = int.to(u32), - }; - } - - switch (int.value) { - inline else => |val| { - if (val >= 0 and val <= std.math.maxInt(u32)) { - break :blk .{ - .tag = .uint_small, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, Tag.UInt32{ - .ty = int.ty, - .value = @intCast(val), - }), - }; - } else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) { - break :blk .{ - .tag = .int_small, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, Tag.Int32{ - .ty = int.ty, - .value = @intCast(val), - }), - }; - } else if (val < 0) { - break :blk .{ - .tag = .int_large, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(val))), - }; - } else { - break :blk .{ - .tag = .uint_large, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(val))), - }; - } - }, - } - }, - .float => |float| switch (self.lookup(float.ty).float_type.bits) { - 16 => .{ - .tag = .float16, - .result_id = spv.allocId(), - .data = @as(u16, @bitCast(float.value.float16)), - }, - 32 => .{ - .tag = .float32, - .result_id = spv.allocId(), - .data = @as(u32, @bitCast(float.value.float32)), - }, - 64 => .{ - .tag = .float64, - .result_id = spv.allocId(), - .data = try self.addExtra(spv, Tag.Float64.encode(float.value.float64)), - }, - else => unreachable, - }, - .undef => |undef| .{ - .tag = .undef, - .result_id = spv.allocId(), - .data = @intFromEnum(undef.ty), - }, - .null => |null_info| .{ - .tag = .null, - .result_id = spv.allocId(), - .data = @intFromEnum(null_info.ty), - }, - .bool => |bool_info| .{ - .tag = switch (bool_info.value) { - true => Tag.bool_true, - false => Tag.bool_false, - }, - .result_id = spv.allocId(), - .data = @intFromEnum(bool_info.ty), - }, - }; - try self.items.append(spv.gpa, item); - - return @enumFromInt(entry.index); -} - -/// Turn a Ref back into a Key. -/// The Key is valid until the next call to resolve(). -pub fn lookup(self: *const Self, ref: Ref) Key { - const item = self.items.get(@intFromEnum(ref)); - const data = item.data; - return switch (item.tag) { - .type_simple => switch (@as(Tag.SimpleType, @enumFromInt(data))) { - .void => .void_type, - .bool => .bool_type, - }, - .type_int_signed => .{ .int_type = .{ - .signedness = .signed, - .bits = @intCast(data), - } }, - .type_int_unsigned => .{ .int_type = .{ - .signedness = .unsigned, - .bits = @intCast(data), - } }, - .type_float => .{ .float_type = .{ - .bits = @intCast(data), - } }, - .type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) }, - .type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) }, - .type_function => { - const payload = self.extraDataTrail(Tag.FunctionType, data); - return .{ - .function_type = .{ - .return_type = payload.data.return_type, - .parameters = @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len]), - }, - }; - }, - .type_ptr_simple => { - const payload = self.extraData(Tag.SimplePointerType, data); - return .{ - .ptr_type = .{ - .storage_class = payload.storage_class, - .child_type = payload.child_type, - .fwd = payload.fwd, - }, - }; - }, - .type_fwd_ptr => { - const payload = self.extraData(Tag.ForwardPointerType, data); - return .{ - .fwd_ptr_type = .{ - .zig_child_type = payload.zig_child_type, - .storage_class = payload.storage_class, - }, - }; - }, - .type_struct_simple => { - const payload = self.extraDataTrail(Tag.SimpleStructType, data); - const member_types: []const Ref = @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len]); - return .{ - .struct_type = .{ - .name = payload.data.name, - .member_types = member_types, - .member_names = null, - }, - }; - }, - .type_struct_simple_with_member_names => { - const payload = self.extraDataTrail(Tag.SimpleStructType, data); - const trailing = self.extra.items[payload.trail..]; - const member_types: []const Ref = @ptrCast(trailing[0..payload.data.members_len]); - const member_names: []const String = @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len]); - return .{ - .struct_type = .{ - .name = payload.data.name, - .member_types = member_types, - .member_names = member_names, - }, - }; - }, - .type_opaque => .{ - .opaque_type = .{ - .name = @enumFromInt(data), - }, - }, - .float16 => .{ .float = .{ - .ty = self.get(.{ .float_type = .{ .bits = 16 } }), - .value = .{ .float16 = @bitCast(@as(u16, @intCast(data))) }, - } }, - .float32 => .{ .float = .{ - .ty = self.get(.{ .float_type = .{ .bits = 32 } }), - .value = .{ .float32 = @bitCast(data) }, - } }, - .float64 => .{ .float = .{ - .ty = self.get(.{ .float_type = .{ .bits = 64 } }), - .value = .{ .float64 = self.extraData(Tag.Float64, data).decode() }, - } }, - .uint8 => .{ .int = .{ - .ty = self.get(.{ .int_type = .{ .signedness = .unsigned, .bits = 8 } }), - .value = .{ .uint64 = data }, - } }, - .uint32 => .{ .int = .{ - .ty = self.get(.{ .int_type = .{ .signedness = .unsigned, .bits = 32 } }), - .value = .{ .uint64 = data }, - } }, - .int_small => { - const payload = self.extraData(Tag.Int32, data); - return .{ .int = .{ - .ty = payload.ty, - .value = .{ .int64 = payload.value }, - } }; - }, - .uint_small => { - const payload = self.extraData(Tag.UInt32, data); - return .{ .int = .{ - .ty = payload.ty, - .value = .{ .uint64 = payload.value }, - } }; - }, - .int_large => { - const payload = self.extraData(Tag.Int64, data); - return .{ .int = .{ - .ty = payload.ty, - .value = .{ .int64 = payload.decode() }, - } }; - }, - .uint_large => { - const payload = self.extraData(Tag.UInt64, data); - return .{ .int = .{ - .ty = payload.ty, - .value = .{ .uint64 = payload.decode() }, - } }; - }, - .undef => .{ .undef = .{ - .ty = @enumFromInt(data), - } }, - .null => .{ .null = .{ - .ty = @enumFromInt(data), - } }, - .bool_true => .{ .bool = .{ - .ty = @enumFromInt(data), - .value = true, - } }, - .bool_false => .{ .bool = .{ - .ty = @enumFromInt(data), - .value = false, - } }, - }; -} - -/// Look op the result-id that corresponds to a particular -/// ref. -pub fn resultId(self: Self, ref: Ref) IdResult { - return self.items.items(.result_id)[@intFromEnum(ref)]; -} - -/// Get the ref for a key that has already been added to the cache. -fn get(self: *const Self, key: Key) Ref { - const adapter: Key.Adapter = .{ .self = self }; - const index = self.map.getIndexAdapted(key, adapter).?; - return @enumFromInt(index); -} - -fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 { - const fields = @typeInfo(@TypeOf(extra)).Struct.fields; - try self.extra.ensureUnusedCapacity(spv.gpa, fields.len); - return try self.addExtraAssumeCapacity(extra); -} - -fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 { - const payload_offset: u32 = @intCast(self.extra.items.len); - inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { - const field_val = @field(extra, field.name); - const word: u32 = switch (field.type) { - u32 => field_val, - i32 => @bitCast(field_val), - Ref => @intFromEnum(field_val), - StorageClass => @intFromEnum(field_val), - String => @intFromEnum(field_val), - InternPool.Index => @intFromEnum(field_val), - else => @compileError("Invalid type: " ++ @typeName(field.type)), - }; - self.extra.appendAssumeCapacity(word); - } - return payload_offset; -} - -fn extraData(self: Self, comptime T: type, offset: u32) T { - return self.extraDataTrail(T, offset).data; -} - -fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, trail: u32 } { - var result: T = undefined; - const fields = @typeInfo(T).Struct.fields; - inline for (fields, 0..) |field, i| { - const word = self.extra.items[offset + i]; - @field(result, field.name) = switch (field.type) { - u32 => word, - i32 => @bitCast(word), - Ref => @enumFromInt(word), - StorageClass => @enumFromInt(word), - String => @enumFromInt(word), - InternPool.Index => @enumFromInt(word), - else => @compileError("Invalid type: " ++ @typeName(field.type)), - }; - } - return .{ - .data = result, - .trail = offset + @as(u32, @intCast(fields.len)), - }; -} - -/// Represents a reference to some null-terminated string. -pub const String = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub const Adapter = struct { - self: *const Self, - - pub fn eql(ctx: @This(), a: []const u8, _: void, b_index: usize) bool { - const offset = ctx.self.strings.values()[b_index]; - const b = std.mem.sliceTo(ctx.self.string_bytes.items[offset..], 0); - return std.mem.eql(u8, a, b); - } - - pub fn hash(ctx: @This(), a: []const u8) u32 { - _ = ctx; - var hasher = std.hash.Wyhash.init(0); - hasher.update(a); - return @truncate(hasher.final()); - } - }; -}; - -/// Add a string to the cache. Must not contain any 0 values. -pub fn addString(self: *Self, spv: *Module, str: []const u8) !String { - assert(std.mem.indexOfScalar(u8, str, 0) == null); - const adapter = String.Adapter{ .self = self }; - const entry = try self.strings.getOrPutAdapted(spv.gpa, str, adapter); - if (!entry.found_existing) { - const offset = self.string_bytes.items.len; - try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len); - self.string_bytes.appendSliceAssumeCapacity(str); - self.string_bytes.appendAssumeCapacity(0); - entry.value_ptr.* = @intCast(offset); - } - - return @enumFromInt(entry.index); -} - -pub fn getString(self: *const Self, ref: String) ?[]const u8 { - return switch (ref) { - .none => null, - else => std.mem.sliceTo(self.string_bytes.items[self.strings.values()[@intFromEnum(ref)]..], 0), - }; -} diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 92b400d438..88fe677345 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -20,11 +20,6 @@ const IdResultType = spec.IdResultType; const Section = @import("Section.zig"); -const Cache = @import("Cache.zig"); -pub const CacheKey = Cache.Key; -pub const CacheRef = Cache.Ref; -pub const CacheString = Cache.String; - /// This structure represents a function that isc in-progress of being emitted. /// Commonly, the contents of this structure will be merged with the appropriate /// sections of the module and re-used. Note that the SPIR-V module system makes @@ -98,7 +93,7 @@ pub const EntryPoint = struct { /// The declaration that should be exported. decl_index: Decl.Index, /// The name of the kernel to be exported. - name: CacheString, + name: []const u8, /// Calling Convention execution_model: spec.ExecutionModel, }; @@ -106,6 +101,9 @@ pub const EntryPoint = struct { /// A general-purpose allocator which may be used to allocate resources for this module gpa: Allocator, +/// Arena for things that need to live for the length of this program. +arena: std.heap.ArenaAllocator, + /// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module". sections: struct { /// Capability instructions @@ -143,14 +141,21 @@ sections: struct { /// SPIR-V instructions return result-ids. This variable holds the module-wide counter for these. next_result_id: Word, -/// Cache for results of OpString instructions for module file names fed to OpSource. -/// Since OpString is pretty much only used for those, we don't need to keep track of all strings, -/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource. -source_file_names: std.AutoArrayHashMapUnmanaged(CacheString, IdRef) = .{}, +/// Cache for results of OpString instructions. +strings: std.StringArrayHashMapUnmanaged(IdRef) = .{}, -/// SPIR-V type- and constant cache. This structure is used to store information about these in a more -/// efficient manner. -cache: Cache = .{}, +/// Some types shouldn't be emitted more than one time, but cannot be caught by +/// the `intern_map` during codegen. Sometimes, IDs are compared to check if +/// types are the same, so we can't delay until the dedup pass. Therefore, +/// this is an ad-hoc structure to cache types where required. +/// According to the SPIR-V specification, section 2.8, this includes all non-aggregate +/// non-pointer types. +cache: struct { + bool_type: ?IdRef = null, + void_type: ?IdRef = null, + int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .{}, + float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .{}, +} = .{}, /// Set of Decls, referred to by Decl.Index. decls: std.ArrayListUnmanaged(Decl) = .{}, @@ -168,6 +173,7 @@ extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = pub fn init(gpa: Allocator) Module { return .{ .gpa = gpa, + .arena = std.heap.ArenaAllocator.init(gpa), .next_result_id = 1, // 0 is an invalid SPIR-V result id, so start counting at 1. }; } @@ -184,8 +190,10 @@ pub fn deinit(self: *Module) void { self.sections.types_globals_constants.deinit(self.gpa); self.sections.functions.deinit(self.gpa); - self.source_file_names.deinit(self.gpa); - self.cache.deinit(self); + self.strings.deinit(self.gpa); + + self.cache.int_types.deinit(self.gpa); + self.cache.float_types.deinit(self.gpa); self.decls.deinit(self.gpa); self.decl_deps.deinit(self.gpa); @@ -193,40 +201,37 @@ pub fn deinit(self: *Module) void { self.entry_points.deinit(self.gpa); self.extended_instruction_set.deinit(self.gpa); + self.arena.deinit(); self.* = undefined; } -pub fn allocId(self: *Module) spec.IdResult { - defer self.next_result_id += 1; - return @enumFromInt(self.next_result_id); +pub const IdRange = struct { + base: u32, + len: u32, + + pub fn at(range: IdRange, i: usize) IdResult { + assert(i < range.len); + return @enumFromInt(range.base + i); + } +}; + +pub fn allocIds(self: *Module, n: u32) IdRange { + defer self.next_result_id += n; + return .{ + .base = self.next_result_id, + .len = n, + }; } -pub fn allocIds(self: *Module, n: u32) spec.IdResult { - defer self.next_result_id += n; - return @enumFromInt(self.next_result_id); +pub fn allocId(self: *Module) IdResult { + return self.allocIds(1).at(0); } pub fn idBound(self: Module) Word { return self.next_result_id; } -pub fn resolve(self: *Module, key: CacheKey) !CacheRef { - return self.cache.resolve(self, key); -} - -pub fn resultId(self: *const Module, ref: CacheRef) IdResult { - return self.cache.resultId(ref); -} - -pub fn resolveId(self: *Module, key: CacheKey) !IdResult { - return self.resultId(try self.resolve(key)); -} - -pub fn resolveString(self: *Module, str: []const u8) !CacheString { - return try self.cache.addString(self, str); -} - fn addEntryPointDeps( self: *Module, decl_index: Decl.Index, @@ -271,7 +276,7 @@ fn entryPoints(self: *Module) !Section { try entry_points.emit(self.gpa, .OpEntryPoint, .{ .execution_model = entry_point.execution_model, .entry_point = entry_point_id, - .name = self.cache.getString(entry_point.name).?, + .name = entry_point.name, .interface = interface.items, }); } @@ -286,9 +291,6 @@ pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word { var entry_points = try self.entryPoints(); defer entry_points.deinit(self.gpa); - var types_constants = try self.cache.materialize(self); - defer types_constants.deinit(self.gpa); - const header = [_]Word{ spec.magic_number, // TODO: From cpu features @@ -331,7 +333,6 @@ pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word { self.sections.debug_strings.toWords(), self.sections.debug_names.toWords(), self.sections.annotations.toWords(), - types_constants.toWords(), self.sections.types_globals_constants.toWords(), self.sections.functions.toWords(), }; @@ -376,83 +377,126 @@ pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef { return result_id; } -/// Fetch the result-id of an OpString instruction that encodes the path of the source -/// file of the decl. This function may also emit an OpSource with source-level information regarding -/// the decl. -pub fn resolveSourceFileName(self: *Module, path: []const u8) !IdRef { - const path_ref = try self.resolveString(path); - const result = try self.source_file_names.getOrPut(self.gpa, path_ref); - if (!result.found_existing) { - const file_result_id = self.allocId(); - result.value_ptr.* = file_result_id; - try self.sections.debug_strings.emit(self.gpa, .OpString, .{ - .id_result = file_result_id, - .string = path, - }); +/// Fetch the result-id of an instruction corresponding to a string. +pub fn resolveString(self: *Module, string: []const u8) !IdRef { + if (self.strings.get(string)) |id| { + return id; } - return result.value_ptr.*; + const id = self.allocId(); + try self.strings.put(self.gpa, try self.arena.allocator().dupe(u8, string), id); + + try self.sections.debug_strings.emit(self.gpa, .OpString, .{ + .id_result = id, + .string = string, + }); + + return id; } -pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !CacheRef { - return try self.resolve(.{ .int_type = .{ - .signedness = signedness, - .bits = bits, - } }); -} - -pub fn vectorType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef { - return try self.resolve(.{ .vector_type = .{ - .component_type = elem_ty_ref, - .component_count = len, - } }); -} - -pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef { - const len_ty_ref = try self.resolve(.{ .int_type = .{ - .signedness = .unsigned, - .bits = 32, - } }); - const len_ref = try self.resolve(.{ .int = .{ - .ty = len_ty_ref, - .value = .{ .uint64 = len }, - } }); - return try self.resolve(.{ .array_type = .{ - .element_type = elem_ty_ref, - .length = len_ref, - } }); -} - -pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef { - const ty = self.cache.lookup(ty_ref).int_type; - const Value = Cache.Key.Int.Value; - return try self.resolveId(.{ .int = .{ - .ty = ty_ref, - .value = switch (ty.signedness) { - .signed => Value{ .int64 = @intCast(value) }, - .unsigned => Value{ .uint64 = @intCast(value) }, - }, - } }); -} - -pub fn constUndef(self: *Module, ty_ref: CacheRef) !IdRef { - return try self.resolveId(.{ .undef = .{ .ty = ty_ref } }); -} - -pub fn constNull(self: *Module, ty_ref: CacheRef) !IdRef { - return try self.resolveId(.{ .null = .{ .ty = ty_ref } }); -} - -pub fn constBool(self: *Module, ty_ref: CacheRef, value: bool) !IdRef { - return try self.resolveId(.{ .bool = .{ .ty = ty_ref, .value = value } }); -} - -pub fn constComposite(self: *Module, ty_ref: CacheRef, members: []const IdRef) !IdRef { +pub fn structType(self: *Module, types: []const IdRef, maybe_names: ?[]const []const u8) !IdRef { const result_id = self.allocId(); - try self.sections.types_globals_constants.emit(self.gpa, .OpSpecConstantComposite, .{ - .id_result_type = self.resultId(ty_ref), + + try self.sections.types_globals_constants.emit(self.gpa, .OpTypeStruct, .{ + .id_result = result_id, + .id_ref = types, + }); + + if (maybe_names) |names| { + assert(names.len == types.len); + for (names, 0..) |name, i| { + try self.memberDebugName(result_id, @intCast(i), name); + } + } + + return result_id; +} + +pub fn boolType(self: *Module) !IdRef { + if (self.cache.bool_type) |id| return id; + + const result_id = self.allocId(); + try self.sections.types_globals_constants.emit(self.gpa, .OpTypeBool, .{ + .id_result = result_id, + }); + self.cache.bool_type = result_id; + return result_id; +} + +pub fn voidType(self: *Module) !IdRef { + if (self.cache.void_type) |id| return id; + + const result_id = self.allocId(); + try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVoid, .{ + .id_result = result_id, + }); + self.cache.void_type = result_id; + try self.debugName(result_id, "void"); + return result_id; +} + +pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !IdRef { + assert(bits > 0); + const entry = try self.cache.int_types.getOrPut(self.gpa, .{ .signedness = signedness, .bits = bits }); + if (!entry.found_existing) { + const result_id = self.allocId(); + entry.value_ptr.* = result_id; + try self.sections.types_globals_constants.emit(self.gpa, .OpTypeInt, .{ + .id_result = result_id, + .width = bits, + .signedness = switch (signedness) { + .signed => 1, + .unsigned => 0, + }, + }); + + switch (signedness) { + .signed => try self.debugNameFmt(result_id, "i{}", .{bits}), + .unsigned => try self.debugNameFmt(result_id, "u{}", .{bits}), + } + } + return entry.value_ptr.*; +} + +pub fn floatType(self: *Module, bits: u16) !IdRef { + assert(bits > 0); + const entry = try self.cache.float_types.getOrPut(self.gpa, .{ .bits = bits }); + if (!entry.found_existing) { + const result_id = self.allocId(); + entry.value_ptr.* = result_id; + try self.sections.types_globals_constants.emit(self.gpa, .OpTypeFloat, .{ + .id_result = result_id, + .width = bits, + }); + try self.debugNameFmt(result_id, "f{}", .{bits}); + } + return entry.value_ptr.*; +} + +pub fn vectorType(self: *Module, len: u32, child_id: IdRef) !IdRef { + const result_id = self.allocId(); + try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVector, .{ + .id_result = result_id, + .component_type = child_id, + .component_count = len, + }); + return result_id; +} + +pub fn constUndef(self: *Module, ty_id: IdRef) !IdRef { + const result_id = self.allocId(); + try self.sections.types_globals_constants.emit(self.gpa, .OpUndef, .{ + .id_result_type = ty_id, + .id_result = result_id, + }); + return result_id; +} + +pub fn constNull(self: *Module, ty_id: IdRef) !IdRef { + const result_id = self.allocId(); + try self.sections.types_globals_constants.emit(self.gpa, .OpConstantNull, .{ + .id_result_type = ty_id, .id_result = result_id, - .constituents = members, }); return result_id; } @@ -520,7 +564,7 @@ pub fn declareEntryPoint( ) !void { try self.entry_points.append(self.gpa, .{ .decl_index = decl_index, - .name = try self.resolveString(name), + .name = try self.arena.allocator().dupe(u8, name), .execution_model = execution_model, }); } diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 728db2d848..950e0375f0 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -245,7 +245,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node const module = try spv.finalize(arena, target); errdefer arena.free(module); - const linked_module = self.linkModule(arena, module) catch |err| switch (err) { + const linked_module = self.linkModule(arena, module, &sub_prog_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => |other| { log.err("error while linking: {s}\n", .{@errorName(other)}); @@ -256,7 +256,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)); } -fn linkModule(self: *SpirV, a: Allocator, module: []Word) ![]Word { +fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: *std.Progress.Node) ![]Word { _ = self; const lower_invocation_globals = @import("SpirV/lower_invocation_globals.zig"); @@ -267,9 +267,9 @@ fn linkModule(self: *SpirV, a: Allocator, module: []Word) ![]Word { defer parser.deinit(); var binary = try parser.parse(module); - try lower_invocation_globals.run(&parser, &binary); - try prune_unused.run(&parser, &binary); - try dedup.run(&parser, &binary); + try lower_invocation_globals.run(&parser, &binary, progress); + try prune_unused.run(&parser, &binary, progress); + try dedup.run(&parser, &binary, progress); return binary.finalize(a); } diff --git a/src/link/SpirV/BinaryModule.zig b/src/link/SpirV/BinaryModule.zig index e150890315..648e55f2ca 100644 --- a/src/link/SpirV/BinaryModule.zig +++ b/src/link/SpirV/BinaryModule.zig @@ -116,7 +116,8 @@ pub const Instruction = struct { const instruction_len = self.words[self.offset] >> 16; defer self.offset += instruction_len; defer self.index += 1; - assert(instruction_len != 0 and self.offset < self.words.len); // Verified in BinaryModule.parse. + assert(instruction_len != 0); + assert(self.offset < self.words.len); return Instruction{ .opcode = @enumFromInt(self.words[self.offset] & 0xFFFF), diff --git a/src/link/SpirV/deduplicate.zig b/src/link/SpirV/deduplicate.zig index 4a73276a9a..4cf5ebf65a 100644 --- a/src/link/SpirV/deduplicate.zig +++ b/src/link/SpirV/deduplicate.zig @@ -47,6 +47,10 @@ const ModuleInfo = struct { result_id_index: u16, /// The first decoration in `self.decorations`. first_decoration: u32, + + fn operands(self: Entity, binary: *const BinaryModule) []const Word { + return binary.instructions[self.first_operand..][0..self.num_operands]; + } }; /// Maps result-id to Entity's @@ -210,10 +214,41 @@ const EntityContext = struct { const entity = self.info.entities.values()[index]; + // If the current pointer is recursive, don't immediately add it to the map. This is to ensure that + // if the current pointer is already recursive, it gets the same hash a pointer that points to the + // same child but has a different result-id. if (entity.kind == .OpTypePointer) { // This may be either a pointer that is forward-referenced in the future, // or a forward reference to a pointer. - const entry = try self.ptr_map_a.getOrPut(self.a, id); + // Note: We use the **struct** here instead of the pointer itself, to avoid an edge case like this: + // + // A - C*' + // \ + // C - C*' + // / + // B - C*" + // + // In this case, hashing A goes like + // A -> C*' -> C -> C*' recursion + // And hashing B goes like + // B -> C*" -> C -> C*' -> C -> C*' recursion + // The are several calls to ptrType in codegen that may C*' and C*" to be generated as separate + // types. This is not a problem for C itself though - this can only be generated through resolveType() + // and so ensures equality by Zig's type system. Technically the above problem is still present, but it + // would only be present in a structure such as + // + // A - C*' - C' + // \ + // C*" - C - C* + // / + // B + // + // where there is a duplicate definition of struct C. Resolving this requires a much more time consuming + // algorithm though, and because we don't expect any correctness issues with it, we leave that for now. + + // TODO: Do we need to mind the storage class here? Its going to be recursive regardless, right? + const struct_id: ResultId = @enumFromInt(entity.operands(self.binary)[2]); + const entry = try self.ptr_map_a.getOrPut(self.a, struct_id); if (entry.found_existing) { // Pointer already seen. Hash the index instead of recursing into its children. std.hash.autoHash(hasher, entry.index); @@ -228,12 +263,17 @@ const EntityContext = struct { for (decorations) |decoration| { try self.hashEntity(hasher, decoration); } + + if (entity.kind == .OpTypePointer) { + const struct_id: ResultId = @enumFromInt(entity.operands(self.binary)[2]); + assert(self.ptr_map_a.swapRemove(struct_id)); + } } fn hashEntity(self: *EntityContext, hasher: *std.hash.Wyhash, entity: ModuleInfo.Entity) !void { std.hash.autoHash(hasher, entity.kind); // Process operands - const operands = self.binary.instructions[entity.first_operand..][0..entity.num_operands]; + const operands = entity.operands(self.binary); for (operands, 0..) |operand, i| { if (i == entity.result_id_index) { // Not relevant, skip... @@ -273,12 +313,19 @@ const EntityContext = struct { const entity_a = self.info.entities.values()[index_a]; const entity_b = self.info.entities.values()[index_b]; + if (entity_a.kind != entity_b.kind) { + return false; + } + if (entity_a.kind == .OpTypePointer) { // May be a forward reference, or should be saved as a potential // forward reference in the future. Whatever the case, it should // be the same for both a and b. - const entry_a = try self.ptr_map_a.getOrPut(self.a, id_a); - const entry_b = try self.ptr_map_b.getOrPut(self.a, id_b); + const struct_id_a: ResultId = @enumFromInt(entity_a.operands(self.binary)[2]); + const struct_id_b: ResultId = @enumFromInt(entity_b.operands(self.binary)[2]); + + const entry_a = try self.ptr_map_a.getOrPut(self.a, struct_id_a); + const entry_b = try self.ptr_map_b.getOrPut(self.a, struct_id_b); if (entry_a.found_existing != entry_b.found_existing) return false; if (entry_a.index != entry_b.index) return false; @@ -306,6 +353,14 @@ const EntityContext = struct { } } + if (entity_a.kind == .OpTypePointer) { + const struct_id_a: ResultId = @enumFromInt(entity_a.operands(self.binary)[2]); + const struct_id_b: ResultId = @enumFromInt(entity_b.operands(self.binary)[2]); + + assert(self.ptr_map_a.swapRemove(struct_id_a)); + assert(self.ptr_map_b.swapRemove(struct_id_b)); + } + return true; } @@ -316,8 +371,8 @@ const EntityContext = struct { return false; } - const operands_a = self.binary.instructions[entity_a.first_operand..][0..entity_a.num_operands]; - const operands_b = self.binary.instructions[entity_b.first_operand..][0..entity_b.num_operands]; + const operands_a = entity_a.operands(self.binary); + const operands_b = entity_b.operands(self.binary); // Note: returns false for operands that have explicit defaults in optional operands... oh well if (operands_a.len != operands_b.len) { @@ -363,7 +418,11 @@ const EntityHashContext = struct { } }; -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { + var sub_node = progress.start("deduplicate", 0); + sub_node.activate(); + defer sub_node.end(); + var arena = std.heap.ArenaAllocator.init(parser.a); defer arena.deinit(); const a = arena.allocator(); @@ -376,6 +435,7 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { .info = &info, .binary = binary, }; + for (info.entities.keys()) |id| { _ = try ctx.hash(id); } @@ -395,6 +455,8 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { } } + sub_node.setEstimatedTotalItems(binary.instructions.len); + // Now process the module, and replace instructions where needed. var section = Section{}; var it = binary.iterateInstructions(); @@ -402,6 +464,8 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { var new_operands = std.ArrayList(u32).init(a); var emitted_ptrs = std.AutoHashMap(ResultId, void).init(a); while (it.next()) |inst| { + defer sub_node.setCompletedItems(inst.offset); + // Result-id can only be the first or second operand const inst_spec = parser.getInstSpec(inst.opcode).?; @@ -454,7 +518,7 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { if (entity.kind == .OpTypePointer and !emitted_ptrs.contains(id)) { // Grab the pointer's storage class from its operands in the original // module. - const storage_class: spec.StorageClass = @enumFromInt(binary.instructions[entity.first_operand + 1]); + const storage_class: spec.StorageClass = @enumFromInt(entity.operands(binary)[1]); try section.emit(a, .OpTypeForwardPointer, .{ .pointer_type = id, .storage_class = storage_class, diff --git a/src/link/SpirV/lower_invocation_globals.zig b/src/link/SpirV/lower_invocation_globals.zig index e6de48ff93..ee992112c8 100644 --- a/src/link/SpirV/lower_invocation_globals.zig +++ b/src/link/SpirV/lower_invocation_globals.zig @@ -682,7 +682,11 @@ const ModuleBuilder = struct { } }; -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { + var sub_node = progress.start("Lower invocation globals", 6); + sub_node.activate(); + defer sub_node.end(); + var arena = std.heap.ArenaAllocator.init(parser.a); defer arena.deinit(); const a = arena.allocator(); @@ -691,10 +695,16 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { try info.resolve(a); var builder = try ModuleBuilder.init(a, binary.*, info); + sub_node.completeOne(); try builder.deriveNewFnInfo(info); + sub_node.completeOne(); try builder.processPreamble(binary.*, info); + sub_node.completeOne(); try builder.emitFunctionTypes(info); + sub_node.completeOne(); try builder.rewriteFunctions(parser, binary.*, info); + sub_node.completeOne(); try builder.emitNewEntryPoints(info); + sub_node.completeOne(); try builder.finalize(parser.a, binary); } diff --git a/src/link/SpirV/prune_unused.zig b/src/link/SpirV/prune_unused.zig index a6d0ceaea7..cefdaddd93 100644 --- a/src/link/SpirV/prune_unused.zig +++ b/src/link/SpirV/prune_unused.zig @@ -255,7 +255,11 @@ fn removeIdsFromMap(a: Allocator, map: anytype, info: ModuleInfo, alive_marker: } } -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { + var sub_node = progress.start("Prune unused IDs", 0); + sub_node.activate(); + defer sub_node.end(); + var arena = std.heap.ArenaAllocator.init(parser.a); defer arena.deinit(); const a = arena.allocator(); @@ -285,9 +289,13 @@ pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule) !void { var section = Section{}; + sub_node.setEstimatedTotalItems(binary.instructions.len); + var new_functions_section: ?usize = null; var it = binary.iterateInstructions(); skip: while (it.next()) |inst| { + defer sub_node.setCompletedItems(inst.offset); + const inst_spec = parser.getInstSpec(inst.opcode).?; reemit: { diff --git a/test/behavior/destructure.zig b/test/behavior/destructure.zig index 78ee999ddb..43ddbb7a4d 100644 --- a/test/behavior/destructure.zig +++ b/test/behavior/destructure.zig @@ -23,8 +23,6 @@ test "simple destructure" { } test "destructure with comptime syntax" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { { diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 17acdd8f1a..8a008d987d 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -181,7 +181,6 @@ test "function with complex callconv and return type expressions" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; try expect(fComplexCallconvRet(3).x == 9); } diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index e1961695aa..9786ea5d06 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -447,7 +447,6 @@ test "return type of generic function is function pointer" { test "coerced function body has inequal value with its uncoerced body" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { const A = B(i32, c); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 370e7465db..efc698c128 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -12,7 +12,6 @@ const math = std.math; test "assignment operators" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; var i: u32 = 0; i += 5; @@ -188,7 +187,6 @@ test "@ctz vectors" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { // This regressed with LLVM 14: diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 2e3e8365ba..c36b4a520d 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -850,8 +850,6 @@ test "inline switch range that includes the maximum value of the switched type" } test "nested break ignores switch conditions and breaks instead" { - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - const S = struct { fn register_to_address(ident: []const u8) !u8 { const reg: u8 = if (std.mem.eql(u8, ident, "zero")) 0x00 else blk: { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index ffdf2305d2..43131ae2d4 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1750,7 +1750,6 @@ test "reinterpret extern union" { // https://github.com/ziglang/zig/issues/19389 return error.SkipZigTest; } - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const U = extern union { foo: u8, diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 5d61f471aa..eaf09db628 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -76,7 +76,6 @@ test "vector int operators" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -1037,7 +1036,6 @@ test "multiplication-assignment operator with an array operand" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const S = struct { fn doTheTest() !void {