mirror of
https://github.com/ziglang/zig.git
synced 2026-02-03 05:03:38 +00:00
spirv: basic setup for using new type constant cache
This commit is contained in:
parent
96a66d14a1
commit
b2a984cda6
@ -22,6 +22,8 @@ const IdResultType = spec.IdResultType;
|
||||
const StorageClass = spec.StorageClass;
|
||||
|
||||
const SpvModule = @import("spirv/Module.zig");
|
||||
const SpvRef = SpvModule.TypeConstantCache.Ref;
|
||||
|
||||
const SpvSection = @import("spirv/Section.zig");
|
||||
const SpvType = @import("spirv/type.zig").Type;
|
||||
const SpvAssembler = @import("spirv/Assembler.zig");
|
||||
@ -1158,6 +1160,18 @@ pub const DeclGen = struct {
|
||||
return try self.spv.resolveType(try SpvType.int(self.spv.arena, signedness, backing_bits));
|
||||
}
|
||||
|
||||
fn intType2(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !SpvRef {
|
||||
const backing_bits = self.backingIntBits(bits) orelse {
|
||||
// TODO: Integers too big for any native type are represented as "composite integers":
|
||||
// An array of largestSupportedIntBits.
|
||||
return self.todo("Implement {s} composite int type of {} bits", .{ @tagName(signedness), bits });
|
||||
};
|
||||
return try self.spv.resolve(.{ .int_type = .{
|
||||
.signedness = signedness,
|
||||
.bits = backing_bits,
|
||||
} });
|
||||
}
|
||||
|
||||
/// Create an integer type that represents 'usize'.
|
||||
fn sizeType(self: *DeclGen) !SpvType.Ref {
|
||||
return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
|
||||
@ -1238,9 +1252,61 @@ pub const DeclGen = struct {
|
||||
return try self.spv.simpleStructType(members.slice());
|
||||
}
|
||||
|
||||
fn resolveType2(self: *DeclGen, ty: Type, repr: Repr) !SpvRef {
|
||||
const target = self.getTarget();
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Void, .NoReturn => return try self.spv.resolve(.void_type),
|
||||
.Bool => switch (repr) {
|
||||
.direct => return try self.spv.resolve(.bool_type),
|
||||
.indirect => return try self.intType2(.unsigned, 1),
|
||||
},
|
||||
.Int => {
|
||||
const int_info = ty.intInfo(target);
|
||||
return try self.intType2(int_info.signedness, int_info.bits);
|
||||
},
|
||||
.Enum => {
|
||||
var buffer: Type.Payload.Bits = undefined;
|
||||
const tag_ty = ty.intTagType(&buffer);
|
||||
return self.resolveType2(tag_ty, repr);
|
||||
},
|
||||
.Float => {
|
||||
// We can (and want) not really emulate floating points with other floating point types like with the integer types,
|
||||
// so if the float is not supported, just return an error.
|
||||
const bits = ty.floatBits(target);
|
||||
const supported = switch (bits) {
|
||||
16 => Target.spirv.featureSetHas(target.cpu.features, .Float16),
|
||||
// 32-bit floats are always supported (see spec, 2.16.1, Data rules).
|
||||
32 => true,
|
||||
64 => Target.spirv.featureSetHas(target.cpu.features, .Float64),
|
||||
else => false,
|
||||
};
|
||||
|
||||
if (!supported) {
|
||||
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
|
||||
}
|
||||
|
||||
return try self.spv.resolve(.{ .float_type = .{ .bits = bits } });
|
||||
},
|
||||
.Array => {
|
||||
const elem_ty = ty.childType();
|
||||
const elem_ty_ref = try self.resolveType2(elem_ty, .direct);
|
||||
const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse {
|
||||
return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()});
|
||||
};
|
||||
_ = total_len;
|
||||
return self.spv.resolve(.{ .array_type = .{
|
||||
.element_type = elem_ty_ref,
|
||||
.length = @intToEnum(SpvRef, 0),
|
||||
} });
|
||||
},
|
||||
else => unreachable, // TODO
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
|
||||
fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!SpvType.Ref {
|
||||
log.debug("resolveType: ty = {}", .{ty.fmt(self.module)});
|
||||
_ = try self.resolveType2(ty, repr);
|
||||
const target = self.getTarget();
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Void, .NoReturn => return try self.spv.resolveType(SpvType.initTag(.void)),
|
||||
|
||||
@ -21,6 +21,7 @@ const IdResultType = spec.IdResultType;
|
||||
|
||||
const Section = @import("Section.zig");
|
||||
const Type = @import("type.zig").Type;
|
||||
pub const TypeConstantCache = @import("TypeConstantCache.zig");
|
||||
|
||||
const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
|
||||
|
||||
@ -125,8 +126,16 @@ sections: struct {
|
||||
// OpModuleProcessed - skip for now.
|
||||
/// Annotation instructions (OpDecorate etc).
|
||||
annotations: Section = .{},
|
||||
/// Type and constant declarations that are generated by the TypeConstantCache.
|
||||
types_and_constants: Section = .{},
|
||||
/// Global variable declarations
|
||||
/// From this section, OpLine and OpNoLine is allowed.
|
||||
/// According to the SPIR-V documentation, this section normally
|
||||
/// also holds type and constant instructions. These are managed
|
||||
/// via the tc_cache instead, which is the sole structure that
|
||||
/// manages that section. These will be inserted between this and
|
||||
/// the previous section when emitting the final binary.
|
||||
/// TODO: Do we need this section? Globals are also managed with another mechanism.
|
||||
/// The only thing that needs to be kept here is OpUndef
|
||||
globals: Section = .{},
|
||||
/// Type declarations, constants, global variables
|
||||
/// Below this section, OpLine and OpNoLine is allowed.
|
||||
types_globals_constants: Section = .{},
|
||||
@ -143,11 +152,10 @@ next_result_id: Word,
|
||||
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
|
||||
source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
|
||||
|
||||
/// SPIR-V type cache. Note that according to SPIR-V spec section 2.8, Types and Variables, non-pointer
|
||||
/// non-aggrerate types (which includes matrices and vectors) must have a _unique_ representation in
|
||||
/// the final binary.
|
||||
/// Note: Uses ArrayHashMap which is insertion ordered, so that we may refer to other types by index (Type.Ref).
|
||||
type_cache: TypeCache = .{},
|
||||
/// SPIR-V type- and constant cache. This structure is used to store information about these in a more
|
||||
/// efficient manner.
|
||||
tc_cache: TypeConstantCache = .{},
|
||||
|
||||
/// Set of Decls, referred to by Decl.Index.
|
||||
decls: std.ArrayListUnmanaged(Decl) = .{},
|
||||
@ -165,7 +173,7 @@ globals: struct {
|
||||
globals: std.AutoArrayHashMapUnmanaged(Decl.Index, Global) = .{},
|
||||
/// This pseudo-section contains the initialization code for all the globals. Instructions from
|
||||
/// here are reordered when flushing the module. Its contents should be part of the
|
||||
/// `types_globals_constants` SPIR-V section.
|
||||
/// `types_globals_constants` SPIR-V section when the module is emitted.
|
||||
section: Section = .{},
|
||||
} = .{},
|
||||
|
||||
@ -184,12 +192,11 @@ pub fn deinit(self: *Module) void {
|
||||
self.sections.debug_strings.deinit(self.gpa);
|
||||
self.sections.debug_names.deinit(self.gpa);
|
||||
self.sections.annotations.deinit(self.gpa);
|
||||
self.sections.types_and_constants(self.gpa);
|
||||
self.sections.types_globals_constants.deinit(self.gpa);
|
||||
self.sections.globals.deinit(self.gpa);
|
||||
self.sections.functions.deinit(self.gpa);
|
||||
|
||||
self.source_file_names.deinit(self.gpa);
|
||||
self.type_cache.deinit(self.gpa);
|
||||
self.tc_cache.deinit(self);
|
||||
|
||||
self.decls.deinit(self.gpa);
|
||||
self.decl_deps.deinit(self.gpa);
|
||||
@ -216,6 +223,18 @@ pub fn idBound(self: Module) Word {
|
||||
return self.next_result_id;
|
||||
}
|
||||
|
||||
pub fn resolve(self: *Module, key: TypeConstantCache.Key) !TypeConstantCache.Ref {
|
||||
return self.tc_cache.resolve(self, key);
|
||||
}
|
||||
|
||||
pub fn resultId(self: *Module, ref: TypeConstantCache.Ref) IdResult {
|
||||
return self.tc_cache.resultId(ref);
|
||||
}
|
||||
|
||||
pub fn resolveId(self: *Module, key: TypeConstantCache.Key) !IdResult {
|
||||
return self.resultId(try self.resolve(key));
|
||||
}
|
||||
|
||||
fn orderGlobalsInto(
|
||||
self: *Module,
|
||||
decl_index: Decl.Index,
|
||||
@ -327,6 +346,9 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
|
||||
var entry_points = try self.entryPoints();
|
||||
defer entry_points.deinit(self.gpa);
|
||||
|
||||
var types_constants = try self.tc_cache.materialize(self);
|
||||
defer types_constants.deinit(self.gpa);
|
||||
|
||||
// Note: needs to be kept in order according to section 2.3!
|
||||
const buffers = &[_][]const Word{
|
||||
&header,
|
||||
@ -337,8 +359,8 @@ pub fn flush(self: *Module, file: std.fs.File) !void {
|
||||
self.sections.debug_strings.toWords(),
|
||||
self.sections.debug_names.toWords(),
|
||||
self.sections.annotations.toWords(),
|
||||
self.sections.types_constants.toWords(),
|
||||
self.sections.types_globals_constants.toWords(),
|
||||
types_constants.toWords(),
|
||||
self.sections.globals.toWords(),
|
||||
globals.toWords(),
|
||||
self.sections.functions.toWords(),
|
||||
};
|
||||
@ -891,8 +913,8 @@ pub fn declareEntryPoint(self: *Module, decl_index: Decl.Index, name: []const u8
|
||||
pub fn debugName(self: *Module, target: IdResult, comptime fmt: []const u8, args: anytype) !void {
|
||||
const name = try std.fmt.allocPrint(self.gpa, fmt, args);
|
||||
defer self.gpa.free(name);
|
||||
try debug.emit(self.gpa, .OpName, .{
|
||||
.target = result_id,
|
||||
try self.sections.debug_names.emit(self.gpa, .OpName, .{
|
||||
.target = target,
|
||||
.name = name,
|
||||
});
|
||||
}
|
||||
|
||||
@ -1,16 +1,19 @@
|
||||
//! This file implements an InternPool-like structure that caches
|
||||
//! SPIR-V types and constants.
|
||||
//! In the case of SPIR-V, the type- and constant instructions
|
||||
//! describe the type and constant fully. This means we can save
|
||||
//! memory by representing these items directly in spir-v code,
|
||||
//! and decoding that when required.
|
||||
//! This does not work for OpDecorate instructions though, and for
|
||||
//! those we keep some additional metadata.
|
||||
//! SPIR-V types and constants. Instead of generating type and
|
||||
//! constant instructions directly, we first keep a representation
|
||||
//! in a compressed database. This is then only later turned into
|
||||
//! actual SPIR-V instructions.
|
||||
//! Note: This cache is insertion-ordered. This means that we
|
||||
//! can materialize the SPIR-V instructions in the proper order,
|
||||
//! as SPIR-V requires that the type is emitted before use.
|
||||
//! Note: According to SPIR-V spec section 2.8, Types and Variables,
|
||||
//! non-pointer non-aggrerate types (which includes matrices and
|
||||
//! vectors) must have a _unique_ representation in the final binary.
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const Section = @import("section.zig");
|
||||
const Section = @import("Section.zig");
|
||||
const Module = @import("Module.zig");
|
||||
|
||||
const spec = @import("spec.zig");
|
||||
@ -21,7 +24,7 @@ const Self = @This();
|
||||
|
||||
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
|
||||
items: std.MultiArrayList(Item) = .{},
|
||||
extra: std.ArrayHashMapUnmanaged(u32) = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
const Item = struct {
|
||||
tag: Tag,
|
||||
@ -32,6 +35,7 @@ const Item = struct {
|
||||
};
|
||||
|
||||
const Tag = enum {
|
||||
// -- Types
|
||||
/// Simple type that has no additional data.
|
||||
/// data is SimpleType.
|
||||
type_simple,
|
||||
@ -45,13 +49,18 @@ const Tag = enum {
|
||||
/// data is number of bits
|
||||
type_float,
|
||||
/// Vector type
|
||||
/// data is payload to Key.VectorType
|
||||
/// data is payload to VectorType
|
||||
type_vector,
|
||||
/// Array type
|
||||
/// data is payload to ArrayType
|
||||
type_array,
|
||||
|
||||
const SimpleType = enum {
|
||||
void,
|
||||
bool,
|
||||
};
|
||||
// -- Values
|
||||
|
||||
const SimpleType = enum { void, bool };
|
||||
|
||||
const VectorType = Key.VectorType;
|
||||
const ArrayType = Key.ArrayType;
|
||||
};
|
||||
|
||||
pub const Ref = enum(u32) { _ };
|
||||
@ -61,11 +70,15 @@ pub const Ref = enum(u32) { _ };
|
||||
/// database: Values described for this structure are ephemeral and stored
|
||||
/// in a more memory-efficient manner internally.
|
||||
pub const Key = union(enum) {
|
||||
void_ty,
|
||||
bool_ty,
|
||||
int_ty: IntType,
|
||||
float_ty: FloatType,
|
||||
vector_ty: VectorType,
|
||||
// -- Types
|
||||
void_type,
|
||||
bool_type,
|
||||
int_type: IntType,
|
||||
float_type: FloatType,
|
||||
vector_type: VectorType,
|
||||
array_type: ArrayType,
|
||||
|
||||
// -- values
|
||||
|
||||
pub const IntType = std.builtin.Type.Int;
|
||||
pub const FloatType = std.builtin.Type.Float;
|
||||
@ -75,55 +88,66 @@ pub const Key = union(enum) {
|
||||
component_count: u32,
|
||||
};
|
||||
|
||||
pub const ArrayType = struct {
|
||||
/// Child type of this array.
|
||||
element_type: Ref,
|
||||
/// Reference to a constant.
|
||||
length: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
stride: u32 = 0,
|
||||
};
|
||||
|
||||
fn hash(self: Key) u32 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
std.hash.autoHash(&hasher, self);
|
||||
return @truncate(u32, hasher.final());
|
||||
}
|
||||
|
||||
fn eql(a: Key, b: Key) u32 {
|
||||
fn eql(a: Key, b: Key) bool {
|
||||
return std.meta.eql(a, b);
|
||||
}
|
||||
|
||||
pub const Adapter = struct {
|
||||
self: *const Self,
|
||||
|
||||
pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: u32) bool {
|
||||
pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool {
|
||||
_ = b_void;
|
||||
return ctx.self.lookup(@intToEnum(Ref, b_map_index)).eql(a);
|
||||
return ctx.self.lookup(@intToEnum(Ref, b_index)).eql(a);
|
||||
}
|
||||
|
||||
pub fn hash(ctx: @This(), a: Key) u32 {
|
||||
return ctx.self.hash(a);
|
||||
_ = ctx;
|
||||
return a.hash();
|
||||
}
|
||||
};
|
||||
|
||||
fn toSimpleType(self: Key) Tag.SimpleType {
|
||||
return switch (self) {
|
||||
.void_ty => .void,
|
||||
.bool_ty => .bool,
|
||||
.void_type => .void,
|
||||
.bool_type => .bool,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Self, spv: Module) void {
|
||||
pub fn deinit(self: *Self, spv: *const Module) void {
|
||||
self.map.deinit(spv.gpa);
|
||||
self.items.deinit(spv.gpa);
|
||||
self.extra.deinit(spv.gpa);
|
||||
}
|
||||
|
||||
/// Actually materialize the database into spir-v instructions.
|
||||
// TODO: This should generate decorations as well as regular instructions.
|
||||
// Important is that these are generated in-order, but that should be fine.
|
||||
pub fn finalize(self: *Self, spv: *Module) !void {
|
||||
// This function should really be the only one that modifies spv.types_and_constants.
|
||||
// TODO: Make this function return the section instead.
|
||||
std.debug.assert(spv.sections.types_and_constants.instructions.items.len == 0);
|
||||
|
||||
/// This function returns a spir-v section of (only) constant and type instructions.
|
||||
/// Additionally, decorations, debug names, etc, are all directly emitted into the
|
||||
/// `spv` module. The section is allocated with `spv.gpa`.
|
||||
pub fn materialize(self: *Self, spv: *Module) !Section {
|
||||
var section = Section{};
|
||||
errdefer section.deinit(spv.gpa);
|
||||
for (self.items.items(.result_id), 0..) |result_id, index| {
|
||||
try self.emit(spv, result_id, @intToEnum(Ref, index));
|
||||
try self.emit(spv, result_id, @intToEnum(Ref, index), §ion);
|
||||
}
|
||||
return section;
|
||||
}
|
||||
|
||||
fn emit(
|
||||
@ -131,97 +155,104 @@ fn emit(
|
||||
spv: *Module,
|
||||
result_id: IdResult,
|
||||
ref: Ref,
|
||||
section: *Section,
|
||||
) !void {
|
||||
const tc = &spv.sections.types_and_constants;
|
||||
const key = self.lookup(ref);
|
||||
switch (key) {
|
||||
.void_ty => {
|
||||
try tc.emit(spv.gpa, .OpTypeVoid, .{ .id_result = result_id });
|
||||
.void_type => {
|
||||
try section.emit(spv.gpa, .OpTypeVoid, .{ .id_result = result_id });
|
||||
try spv.debugName(result_id, "void", .{});
|
||||
},
|
||||
.bool_ty => {
|
||||
try tc.emit(spv.gpa, .OpTypeBool, .{ .id_result = result_id });
|
||||
.bool_type => {
|
||||
try section.emit(spv.gpa, .OpTypeBool, .{ .id_result = result_id });
|
||||
try spv.debugName(result_id, "bool", .{});
|
||||
},
|
||||
.int_ty => |int| {
|
||||
try tc.emit(spv.gpa, .OpTypeInt, .{
|
||||
.int_type => |int| {
|
||||
try section.emit(spv.gpa, .OpTypeInt, .{
|
||||
.id_result = result_id,
|
||||
.width = int.bits,
|
||||
.signedness = switch (int.signedness) {
|
||||
.unsigned => 0,
|
||||
.unsigned => @as(spec.Word, 0),
|
||||
.signed => 1,
|
||||
},
|
||||
});
|
||||
const ui: []const u8 = switch (int.signedness) {
|
||||
0 => "u",
|
||||
1 => "i",
|
||||
else => unreachable,
|
||||
.unsigned => "u",
|
||||
.signed => "i",
|
||||
};
|
||||
try spv.debugName(result_id, "{s}{}", .{ ui, int.bits });
|
||||
},
|
||||
.float_ty => |float| {
|
||||
try tc.emit(spv.gpa, .OpTypeFloat, .{
|
||||
.float_type => |float| {
|
||||
try section.emit(spv.gpa, .OpTypeFloat, .{
|
||||
.id_result = result_id,
|
||||
.width = float.bits,
|
||||
});
|
||||
try spv.debugName(result_id, "f{}", .{float.bits});
|
||||
},
|
||||
.vector_ty => |vector| {
|
||||
try tc.emit(spv.gpa, .OpTypeVector, .{
|
||||
.vector_type => |vector| {
|
||||
try section.emit(spv.gpa, .OpTypeVector, .{
|
||||
.id_result = result_id,
|
||||
.component_type = self.resultId(vector.component_type),
|
||||
.component_count = vector.component_count,
|
||||
});
|
||||
},
|
||||
.array_type => |array| {
|
||||
try section.emit(spv.gpa, .OpTypeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.resultId(array.element_type),
|
||||
.length = self.resultId(array.length),
|
||||
});
|
||||
if (array.stride != 0) {
|
||||
try spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = array.stride } });
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a key to this cache. Returns a reference to the key that
|
||||
/// was added. The corresponding result-id can be queried using
|
||||
/// self.resultId with the result.
|
||||
pub fn add(self: *Self, spv: *Module, key: Key) !Ref {
|
||||
pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
|
||||
const adapter: Key.Adapter = .{ .self = self };
|
||||
const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter);
|
||||
if (entry.found_existing) {
|
||||
return @intToEnum(Ref, entry.index);
|
||||
}
|
||||
const result_id = spv.allocId();
|
||||
try self.items.ensureUnusedCapacity(spv.gpa, 1);
|
||||
switch (key) {
|
||||
inline .void_ty, .bool_ty => {
|
||||
self.items.appendAssumeCapacity(.{
|
||||
.tag = .type_simple,
|
||||
.result_id = result_id,
|
||||
.data = @enumToInt(key.toSimpleType()),
|
||||
});
|
||||
const item: Item = switch (key) {
|
||||
inline .void_type, .bool_type => .{
|
||||
.tag = .type_simple,
|
||||
.result_id = result_id,
|
||||
.data = @enumToInt(key.toSimpleType()),
|
||||
},
|
||||
.int_ty => |int| {
|
||||
.int_type => |int| blk: {
|
||||
const t: Tag = switch (int.signedness) {
|
||||
.signed => .type_int_signed,
|
||||
.unsigned => .type_int_unsigned,
|
||||
};
|
||||
self.items.appendAssumeCapacity(.{
|
||||
break :blk .{
|
||||
.tag = t,
|
||||
.result_id = result_id,
|
||||
.data = int.bits,
|
||||
});
|
||||
};
|
||||
},
|
||||
.float_ty => |float| {
|
||||
self.items.appendAssumeCapacity(.{
|
||||
.tag = .type_float,
|
||||
.result_id = result_id,
|
||||
.data = float.bits,
|
||||
});
|
||||
.float_type => |float| .{
|
||||
.tag = .type_float,
|
||||
.result_id = result_id,
|
||||
.data = float.bits,
|
||||
},
|
||||
.vector_ty => |vec| {
|
||||
const payload = try self.addExtra(vec);
|
||||
self.items.appendAssumeCapacity(.{
|
||||
.tag = .type_vector,
|
||||
.result_id = result_id,
|
||||
.data = payload,
|
||||
});
|
||||
.vector_type => |vector| .{
|
||||
.tag = .type_vector,
|
||||
.result_id = result_id,
|
||||
.data = try self.addExtra(spv, vector),
|
||||
},
|
||||
}
|
||||
.array_type => |array| .{
|
||||
.tag = .type_array,
|
||||
.result_id = result_id,
|
||||
.data = try self.addExtra(spv, array),
|
||||
},
|
||||
};
|
||||
try self.items.append(spv.gpa, item);
|
||||
|
||||
return @intToEnum(Ref, entry.index);
|
||||
}
|
||||
@ -238,36 +269,35 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
|
||||
const data = item.data;
|
||||
return switch (item.tag) {
|
||||
.type_simple => switch (@intToEnum(Tag.SimpleType, data)) {
|
||||
.void => .void_ty,
|
||||
.bool => .bool_ty,
|
||||
.void => .void_type,
|
||||
.bool => .bool_type,
|
||||
},
|
||||
.type_int_signed => .{ .int_ty = .{
|
||||
.type_int_signed => .{ .int_type = .{
|
||||
.signedness = .signed,
|
||||
.bits = @intCast(u16, data),
|
||||
} },
|
||||
.type_int_unsigned => .{ .int_ty = .{
|
||||
.type_int_unsigned => .{ .int_type = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = @intCast(u16, data),
|
||||
} },
|
||||
.type_float => .{ .float_ty = .{
|
||||
.type_float => .{ .float_type = .{
|
||||
.bits = @intCast(u16, data),
|
||||
} },
|
||||
.type_vector => .{
|
||||
.vector_ty = self.extraData(Key.VectorType, data),
|
||||
},
|
||||
.type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) },
|
||||
.type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) },
|
||||
};
|
||||
}
|
||||
|
||||
fn addExtra(self: *Self, gpa: Allocator, extra: anytype) !u32 {
|
||||
fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
|
||||
const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
|
||||
try self.extra.ensureUnusedCapacity(gpa, fields.len);
|
||||
try self.addExtraAssumeCapacity(extra);
|
||||
try self.extra.ensureUnusedCapacity(spv.gpa, fields.len);
|
||||
return try self.addExtraAssumeCapacity(extra);
|
||||
}
|
||||
|
||||
fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 {
|
||||
const payload_offset = @intCast(u32, self.extra.items.len);
|
||||
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
|
||||
const field_val = @field(field, field.name);
|
||||
const field_val = @field(extra, field.name);
|
||||
const word = switch (field.type) {
|
||||
u32 => field_val,
|
||||
Ref => @enumToInt(field_val),
|
||||
@ -279,8 +309,13 @@ fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 {
|
||||
}
|
||||
|
||||
fn extraData(self: Self, comptime T: type, offset: u32) T {
|
||||
return self.extraDataTrail(T, offset).data;
|
||||
}
|
||||
|
||||
fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, trail: u32 } {
|
||||
var result: T = undefined;
|
||||
inline for (@typeInfo(T).Struct.fields, 0..) |field, i| {
|
||||
const fields = @typeInfo(T).Struct.fields;
|
||||
inline for (fields, 0..) |field, i| {
|
||||
const word = self.extra.items[offset + i];
|
||||
@field(result, field.name) = switch (field.type) {
|
||||
u32 => word,
|
||||
@ -288,5 +323,8 @@ fn extraData(self: Self, comptime T: type, offset: u32) T {
|
||||
else => @compileError("Invalid type: " ++ @typeName(field.type)),
|
||||
};
|
||||
}
|
||||
return result;
|
||||
return .{
|
||||
.data = result,
|
||||
.trail = offset + @intCast(u32, fields.len),
|
||||
};
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user