mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 04:48:20 +00:00
Merge pull request #10665 from Snektron/spirv-improvements
spir-v improvements
This commit is contained in:
commit
9f16d9ed07
File diff suppressed because it is too large
Load Diff
428
src/codegen/spirv/Module.zig
Normal file
428
src/codegen/spirv/Module.zig
Normal file
@ -0,0 +1,428 @@
|
||||
//! This structure represents a SPIR-V (sections) module being compiled, and keeps track of all relevant information.
|
||||
//! That includes the actual instructions, the current result-id bound, and data structures for querying result-id's
|
||||
//! of data which needs to be persistent over different calls to Decl code generation.
|
||||
//!
|
||||
//! A SPIR-V binary module supports both little- and big endian layout. The layout is detected by the magic word in the
|
||||
//! header. Therefore, we can ignore any byte order throughout the implementation, and just use the host byte order,
|
||||
//! and make this a problem for the consumer.
|
||||
const Module = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const ZigDecl = @import("../../Module.zig").Decl;
|
||||
|
||||
const spec = @import("spec.zig");
|
||||
const Word = spec.Word;
|
||||
const IdRef = spec.IdRef;
|
||||
const IdResult = spec.IdResult;
|
||||
const IdResultType = spec.IdResultType;
|
||||
|
||||
const Section = @import("Section.zig");
|
||||
const Type = @import("type.zig").Type;
|
||||
|
||||
const TypeCache = std.ArrayHashMapUnmanaged(Type, IdResultType, Type.ShallowHashContext32, true);
|
||||
|
||||
/// A general-purpose allocator which may be used to allocate resources for this module
|
||||
gpa: Allocator,
|
||||
|
||||
/// An arena allocator used to store things that have the same lifetime as this module.
|
||||
arena: Allocator,
|
||||
|
||||
/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
|
||||
sections: struct {
|
||||
/// Capability instructions
|
||||
capabilities: Section = .{},
|
||||
/// OpExtension instructions
|
||||
extensions: Section = .{},
|
||||
// OpExtInstImport instructions - skip for now.
|
||||
// memory model defined by target, not required here.
|
||||
/// OpEntryPoint instructions.
|
||||
entry_points: Section = .{},
|
||||
// OpExecutionMode and OpExecutionModeId instructions - skip for now.
|
||||
/// OpString, OpSourcExtension, OpSource, OpSourceContinued.
|
||||
debug_strings: Section = .{},
|
||||
// OpName, OpMemberName - skip for now.
|
||||
// OpModuleProcessed - skip for now.
|
||||
/// Annotation instructions (OpDecorate etc).
|
||||
annotations: Section = .{},
|
||||
/// Type declarations, constants, global variables
|
||||
/// Below this section, OpLine and OpNoLine is allowed.
|
||||
types_globals_constants: Section = .{},
|
||||
// Functions without a body - skip for now.
|
||||
/// Regular function definitions.
|
||||
functions: Section = .{},
|
||||
} = .{},
|
||||
|
||||
/// SPIR-V instructions return result-ids. This variable holds the module-wide counter for these.
|
||||
next_result_id: Word,
|
||||
|
||||
/// Cache for results of OpString instructions for module file names fed to OpSource.
|
||||
/// Since OpString is pretty much only used for those, we don't need to keep track of all strings,
|
||||
/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
|
||||
source_file_names: std.StringHashMapUnmanaged(IdRef) = .{},
|
||||
|
||||
/// SPIR-V type cache. Note that according to SPIR-V spec section 2.8, Types and Variables, non-pointer
|
||||
/// non-aggrerate types (which includes matrices and vectors) must have a _unique_ representation in
|
||||
/// the final binary.
|
||||
/// Note: Uses ArrayHashMap which is insertion ordered, so that we may refer to other types by index (Type.Ref).
|
||||
type_cache: TypeCache = .{},
|
||||
|
||||
pub fn init(gpa: Allocator, arena: Allocator) Module {
|
||||
return .{
|
||||
.gpa = gpa,
|
||||
.arena = arena,
|
||||
.next_result_id = 1, // 0 is an invalid SPIR-V result id, so start counting at 1.
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Module) void {
|
||||
self.sections.capabilities.deinit(self.gpa);
|
||||
self.sections.extensions.deinit(self.gpa);
|
||||
self.sections.entry_points.deinit(self.gpa);
|
||||
self.sections.debug_strings.deinit(self.gpa);
|
||||
self.sections.annotations.deinit(self.gpa);
|
||||
self.sections.types_globals_constants.deinit(self.gpa);
|
||||
self.sections.functions.deinit(self.gpa);
|
||||
|
||||
self.source_file_names.deinit(self.gpa);
|
||||
self.type_cache.deinit(self.gpa);
|
||||
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn allocId(self: *Module) spec.IdResult {
|
||||
defer self.next_result_id += 1;
|
||||
return .{ .id = self.next_result_id };
|
||||
}
|
||||
|
||||
pub fn idBound(self: Module) Word {
|
||||
return self.next_result_id;
|
||||
}
|
||||
|
||||
/// Emit this module as a spir-v binary.
|
||||
pub fn flush(self: Module, file: std.fs.File) !void {
|
||||
// See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
|
||||
|
||||
const header = [_]Word{
|
||||
spec.magic_number,
|
||||
(spec.version.major << 16) | (spec.version.minor << 8),
|
||||
0, // TODO: Register Zig compiler magic number.
|
||||
self.idBound(),
|
||||
0, // Schema (currently reserved for future use)
|
||||
};
|
||||
|
||||
// Note: needs to be kept in order according to section 2.3!
|
||||
const buffers = &[_][]const Word{
|
||||
&header,
|
||||
self.sections.capabilities.toWords(),
|
||||
self.sections.extensions.toWords(),
|
||||
self.sections.entry_points.toWords(),
|
||||
self.sections.debug_strings.toWords(),
|
||||
self.sections.annotations.toWords(),
|
||||
self.sections.types_globals_constants.toWords(),
|
||||
self.sections.functions.toWords(),
|
||||
};
|
||||
|
||||
var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
|
||||
var file_size: u64 = 0;
|
||||
for (iovc_buffers) |*iovc, i| {
|
||||
// Note, since spir-v supports both little and big endian we can ignore byte order here and
|
||||
// just treat the words as a sequence of bytes.
|
||||
const bytes = std.mem.sliceAsBytes(buffers[i]);
|
||||
iovc.* = .{ .iov_base = bytes.ptr, .iov_len = bytes.len };
|
||||
file_size += bytes.len;
|
||||
}
|
||||
|
||||
try file.seekTo(0);
|
||||
try file.setEndPos(file_size);
|
||||
try file.pwritevAll(&iovc_buffers, 0);
|
||||
}
|
||||
|
||||
/// Fetch the result-id of an OpString instruction that encodes the path of the source
|
||||
/// file of the decl. This function may also emit an OpSource with source-level information regarding
|
||||
/// the decl.
|
||||
pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
|
||||
const path = decl.getFileScope().sub_file_path;
|
||||
const result = try self.source_file_names.getOrPut(self.gpa, path);
|
||||
if (!result.found_existing) {
|
||||
const file_result_id = self.allocId();
|
||||
result.value_ptr.* = file_result_id.toRef();
|
||||
try self.sections.debug_strings.emit(self.gpa, .OpString, .{
|
||||
.id_result = file_result_id,
|
||||
.string = path,
|
||||
});
|
||||
|
||||
try self.sections.debug_strings.emit(self.gpa, .OpSource, .{
|
||||
.source_language = .Unknown, // TODO: Register Zig source language.
|
||||
.version = 0, // TODO: Zig version as u32?
|
||||
.file = file_result_id.toRef(),
|
||||
.source = null, // TODO: Store actual source also?
|
||||
});
|
||||
}
|
||||
|
||||
return result.value_ptr.*;
|
||||
}
|
||||
|
||||
/// Fetch a result-id for a spir-v type. This function deduplicates the type as appropriate,
|
||||
/// and returns a cached version if that exists.
|
||||
/// Note: This function does not attempt to perform any validation on the type.
|
||||
/// The type is emitted in a shallow fashion; any child types should already
|
||||
/// be emitted at this point.
|
||||
pub fn resolveType(self: *Module, ty: Type) !Type.Ref {
|
||||
const result = try self.type_cache.getOrPut(self.gpa, ty);
|
||||
if (!result.found_existing) {
|
||||
result.value_ptr.* = try self.emitType(ty);
|
||||
}
|
||||
return result.index;
|
||||
}
|
||||
|
||||
pub fn resolveTypeId(self: *Module, ty: Type) !IdRef {
|
||||
return self.typeResultId(try self.resolveType(ty));
|
||||
}
|
||||
|
||||
/// Get the result-id of a particular type, by reference. Asserts type_ref is valid.
|
||||
pub fn typeResultId(self: Module, type_ref: Type.Ref) IdResultType {
|
||||
return self.type_cache.values()[type_ref];
|
||||
}
|
||||
|
||||
/// Get the result-id of a particular type as IdRef, by Type.Ref. Asserts type_ref is valid.
|
||||
pub fn typeRefId(self: Module, type_ref: Type.Ref) IdRef {
|
||||
return self.type_cache.values()[type_ref].toRef();
|
||||
}
|
||||
|
||||
/// Unconditionally emit a spir-v type into the appropriate section.
|
||||
/// Note: If this function is called with a type that is already generated, it may yield an invalid module
|
||||
/// as non-pointer non-aggregrate types must me unique!
|
||||
/// Note: This function does not attempt to perform any validation on the type.
|
||||
/// The type is emitted in a shallow fashion; any child types should already
|
||||
/// be emitted at this point.
|
||||
pub fn emitType(self: *Module, ty: Type) !IdResultType {
|
||||
const result_id = self.allocId();
|
||||
const ref_id = result_id.toRef();
|
||||
const types = &self.sections.types_globals_constants;
|
||||
const annotations = &self.sections.annotations;
|
||||
const result_id_operand = .{ .id_result = result_id };
|
||||
|
||||
switch (ty.tag()) {
|
||||
.void => try types.emit(self.gpa, .OpTypeVoid, result_id_operand),
|
||||
.bool => try types.emit(self.gpa, .OpTypeBool, result_id_operand),
|
||||
.int => try types.emit(self.gpa, .OpTypeInt, .{
|
||||
.id_result = result_id,
|
||||
.width = ty.payload(.int).width,
|
||||
.signedness = switch (ty.payload(.int).signedness) {
|
||||
.unsigned => @as(spec.LiteralInteger, 0),
|
||||
.signed => 1,
|
||||
},
|
||||
}),
|
||||
.float => try types.emit(self.gpa, .OpTypeFloat, .{
|
||||
.id_result = result_id,
|
||||
.width = ty.payload(.float).width,
|
||||
}),
|
||||
.vector => try types.emit(self.gpa, .OpTypeVector, .{
|
||||
.id_result = result_id,
|
||||
.component_type = self.typeResultId(ty.childType()).toRef(),
|
||||
.component_count = ty.payload(.vector).component_count,
|
||||
}),
|
||||
.matrix => try types.emit(self.gpa, .OpTypeMatrix, .{
|
||||
.id_result = result_id,
|
||||
.column_type = self.typeResultId(ty.childType()).toRef(),
|
||||
.column_count = ty.payload(.matrix).column_count,
|
||||
}),
|
||||
.image => {
|
||||
const info = ty.payload(.image);
|
||||
try types.emit(self.gpa, .OpTypeImage, .{
|
||||
.id_result = result_id,
|
||||
.sampled_type = self.typeResultId(ty.childType()).toRef(),
|
||||
.dim = info.dim,
|
||||
.depth = @enumToInt(info.depth),
|
||||
.arrayed = @boolToInt(info.arrayed),
|
||||
.ms = @boolToInt(info.multisampled),
|
||||
.sampled = @enumToInt(info.sampled),
|
||||
.image_format = info.format,
|
||||
.access_qualifier = info.access_qualifier,
|
||||
});
|
||||
},
|
||||
.sampler => try types.emit(self.gpa, .OpTypeSampler, result_id_operand),
|
||||
.sampled_image => try types.emit(self.gpa, .OpTypeSampledImage, .{
|
||||
.id_result = result_id,
|
||||
.image_type = self.typeResultId(ty.childType()).toRef(),
|
||||
}),
|
||||
.array => {
|
||||
const info = ty.payload(.array);
|
||||
assert(info.length != 0);
|
||||
try types.emit(self.gpa, .OpTypeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.typeResultId(ty.childType()).toRef(),
|
||||
.length = .{ .id = 0 }, // TODO: info.length must be emitted as constant!
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
},
|
||||
.runtime_array => {
|
||||
const info = ty.payload(.runtime_array);
|
||||
try types.emit(self.gpa, .OpTypeRuntimeArray, .{
|
||||
.id_result = result_id,
|
||||
.element_type = self.typeResultId(ty.childType()).toRef(),
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
},
|
||||
.@"struct" => {
|
||||
const info = ty.payload(.@"struct");
|
||||
try types.emitRaw(self.gpa, .OpTypeStruct, 1 + info.members.len);
|
||||
types.writeOperand(IdResult, result_id);
|
||||
for (info.members) |member| {
|
||||
types.writeOperand(IdRef, self.typeResultId(member.ty).toRef());
|
||||
}
|
||||
try self.decorateStruct(ref_id, info);
|
||||
},
|
||||
.@"opaque" => try types.emit(self.gpa, .OpTypeOpaque, .{
|
||||
.id_result = result_id,
|
||||
.literal_string = ty.payload(.@"opaque").name,
|
||||
}),
|
||||
.pointer => {
|
||||
const info = ty.payload(.pointer);
|
||||
try types.emit(self.gpa, .OpTypePointer, .{
|
||||
.id_result = result_id,
|
||||
.storage_class = info.storage_class,
|
||||
.type = self.typeResultId(ty.childType()).toRef(),
|
||||
});
|
||||
if (info.array_stride != 0) {
|
||||
try annotations.decorate(self.gpa, ref_id, .{ .ArrayStride = .{ .array_stride = info.array_stride } });
|
||||
}
|
||||
if (info.alignment) |alignment| {
|
||||
try annotations.decorate(self.gpa, ref_id, .{ .Alignment = .{ .alignment = alignment } });
|
||||
}
|
||||
if (info.max_byte_offset) |max_byte_offset| {
|
||||
try annotations.decorate(self.gpa, ref_id, .{ .MaxByteOffset = .{ .max_byte_offset = max_byte_offset } });
|
||||
}
|
||||
},
|
||||
.function => {
|
||||
const info = ty.payload(.function);
|
||||
try types.emitRaw(self.gpa, .OpTypeFunction, 2 + info.parameters.len);
|
||||
types.writeOperand(IdResult, result_id);
|
||||
types.writeOperand(IdRef, self.typeResultId(info.return_type).toRef());
|
||||
for (info.parameters) |parameter_type| {
|
||||
types.writeOperand(IdRef, self.typeResultId(parameter_type).toRef());
|
||||
}
|
||||
},
|
||||
.event => try types.emit(self.gpa, .OpTypeEvent, result_id_operand),
|
||||
.device_event => try types.emit(self.gpa, .OpTypeDeviceEvent, result_id_operand),
|
||||
.reserve_id => try types.emit(self.gpa, .OpTypeReserveId, result_id_operand),
|
||||
.queue => try types.emit(self.gpa, .OpTypeQueue, result_id_operand),
|
||||
.pipe => try types.emit(self.gpa, .OpTypePipe, .{
|
||||
.id_result = result_id,
|
||||
.qualifier = ty.payload(.pipe).qualifier,
|
||||
}),
|
||||
.pipe_storage => try types.emit(self.gpa, .OpTypePipeStorage, result_id_operand),
|
||||
.named_barrier => try types.emit(self.gpa, .OpTypeNamedBarrier, result_id_operand),
|
||||
}
|
||||
|
||||
return result_id.toResultType();
|
||||
}
|
||||
|
||||
fn decorateStruct(self: *Module, target: IdRef, info: *const Type.Payload.Struct) !void {
|
||||
const annotations = &self.sections.annotations;
|
||||
|
||||
// Decorations for the struct type itself.
|
||||
if (info.decorations.block)
|
||||
try annotations.decorate(self.gpa, target, .Block);
|
||||
if (info.decorations.buffer_block)
|
||||
try annotations.decorate(self.gpa, target, .BufferBlock);
|
||||
if (info.decorations.glsl_shared)
|
||||
try annotations.decorate(self.gpa, target, .GLSLShared);
|
||||
if (info.decorations.glsl_packed)
|
||||
try annotations.decorate(self.gpa, target, .GLSLPacked);
|
||||
if (info.decorations.c_packed)
|
||||
try annotations.decorate(self.gpa, target, .CPacked);
|
||||
|
||||
// Decorations for the struct members.
|
||||
const extra = info.member_decoration_extra;
|
||||
var extra_i: u32 = 0;
|
||||
for (info.members) |member, i| {
|
||||
const d = member.decorations;
|
||||
const index = @intCast(Word, i);
|
||||
switch (d.matrix_layout) {
|
||||
.row_major => try annotations.decorateMember(self.gpa, target, index, .RowMajor),
|
||||
.col_major => try annotations.decorateMember(self.gpa, target, index, .ColMajor),
|
||||
.none => {},
|
||||
}
|
||||
if (d.matrix_layout != .none) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.MatrixStride = .{ .matrix_stride = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
|
||||
if (d.no_perspective)
|
||||
try annotations.decorateMember(self.gpa, target, index, .NoPerspective);
|
||||
if (d.flat)
|
||||
try annotations.decorateMember(self.gpa, target, index, .Flat);
|
||||
if (d.patch)
|
||||
try annotations.decorateMember(self.gpa, target, index, .Patch);
|
||||
if (d.centroid)
|
||||
try annotations.decorateMember(self.gpa, target, index, .Centroid);
|
||||
if (d.sample)
|
||||
try annotations.decorateMember(self.gpa, target, index, .Sample);
|
||||
if (d.invariant)
|
||||
try annotations.decorateMember(self.gpa, target, index, .Invariant);
|
||||
if (d.@"volatile")
|
||||
try annotations.decorateMember(self.gpa, target, index, .Volatile);
|
||||
if (d.coherent)
|
||||
try annotations.decorateMember(self.gpa, target, index, .Coherent);
|
||||
if (d.non_writable)
|
||||
try annotations.decorateMember(self.gpa, target, index, .NonWritable);
|
||||
if (d.non_readable)
|
||||
try annotations.decorateMember(self.gpa, target, index, .NonReadable);
|
||||
|
||||
if (d.builtin) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.BuiltIn = .{ .built_in = @intToEnum(spec.BuiltIn, extra[extra_i]) },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.stream) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.Stream = .{ .stream_number = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.location) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.Location = .{ .location = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.component) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.Component = .{ .component = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.xfb_buffer) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.XfbBuffer = .{ .xfb_buffer_number = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.xfb_stride) {
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.XfbStride = .{ .xfb_stride = extra[extra_i] },
|
||||
});
|
||||
extra_i += 1;
|
||||
}
|
||||
if (d.user_semantic) {
|
||||
const len = extra[extra_i];
|
||||
extra_i += 1;
|
||||
const semantic = @ptrCast([*]const u8, &extra[extra_i])[0..len];
|
||||
try annotations.decorateMember(self.gpa, target, index, .{
|
||||
.UserSemantic = .{ .semantic = semantic },
|
||||
});
|
||||
extra_i += std.math.divCeil(u32, extra_i, @sizeOf(u32)) catch unreachable;
|
||||
}
|
||||
}
|
||||
}
|
||||
423
src/codegen/spirv/Section.zig
Normal file
423
src/codegen/spirv/Section.zig
Normal file
@ -0,0 +1,423 @@
|
||||
//! Represents a section or subsection of instructions in a SPIR-V binary. Instructions can be append
|
||||
//! to separate sections, which can then later be merged into the final binary.
|
||||
const Section = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const testing = std.testing;
|
||||
|
||||
const spec = @import("spec.zig");
|
||||
const Word = spec.Word;
|
||||
const DoubleWord = std.meta.Int(.unsigned, @bitSizeOf(Word) * 2);
|
||||
const Log2Word = std.math.Log2Int(Word);
|
||||
|
||||
const Opcode = spec.Opcode;
|
||||
|
||||
/// The instructions in this section. Memory is owned by the Module
|
||||
/// externally associated to this Section.
|
||||
instructions: std.ArrayListUnmanaged(Word) = .{},
|
||||
|
||||
pub fn deinit(section: *Section, allocator: Allocator) void {
|
||||
section.instructions.deinit(allocator);
|
||||
section.* = undefined;
|
||||
}
|
||||
|
||||
/// Clear the instructions in this section
|
||||
pub fn reset(section: *Section) void {
|
||||
section.instructions.items.len = 0;
|
||||
}
|
||||
|
||||
pub fn toWords(section: Section) []Word {
|
||||
return section.instructions.items;
|
||||
}
|
||||
|
||||
/// Append the instructions from another section into this section.
|
||||
pub fn append(section: *Section, allocator: Allocator, other_section: Section) !void {
|
||||
try section.instructions.appendSlice(allocator, other_section.instructions.items);
|
||||
}
|
||||
|
||||
/// Write an instruction and size, operands are to be inserted manually.
|
||||
pub fn emitRaw(
|
||||
section: *Section,
|
||||
allocator: Allocator,
|
||||
opcode: Opcode,
|
||||
operands: usize, // opcode itself not included
|
||||
) !void {
|
||||
const word_count = 1 + operands;
|
||||
try section.instructions.ensureUnusedCapacity(allocator, word_count);
|
||||
section.writeWord((@intCast(Word, word_count << 16)) | @enumToInt(opcode));
|
||||
}
|
||||
|
||||
pub fn emit(
|
||||
section: *Section,
|
||||
allocator: Allocator,
|
||||
comptime opcode: spec.Opcode,
|
||||
operands: opcode.Operands(),
|
||||
) !void {
|
||||
const word_count = instructionSize(opcode, operands);
|
||||
try section.instructions.ensureUnusedCapacity(allocator, word_count);
|
||||
section.writeWord(@intCast(Word, word_count << 16) | @enumToInt(opcode));
|
||||
section.writeOperands(opcode.Operands(), operands);
|
||||
}
|
||||
|
||||
/// Decorate a result-id.
|
||||
pub fn decorate(
|
||||
section: *Section,
|
||||
allocator: Allocator,
|
||||
target: spec.IdRef,
|
||||
decoration: spec.Decoration.Extended,
|
||||
) !void {
|
||||
try section.emit(allocator, .OpDecorate, .{
|
||||
.target = target,
|
||||
.decoration = decoration,
|
||||
});
|
||||
}
|
||||
|
||||
/// Decorate a result-id which is a member of some struct.
|
||||
pub fn decorateMember(
|
||||
section: *Section,
|
||||
allocator: Allocator,
|
||||
structure_type: spec.IdRef,
|
||||
member: u32,
|
||||
decoration: spec.Decoration.Extended,
|
||||
) !void {
|
||||
try section.emit(allocator, .OpMemberDecorate, .{
|
||||
.structure_type = structure_type,
|
||||
.member = member,
|
||||
.decoration = decoration,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn writeWord(section: *Section, word: Word) void {
|
||||
section.instructions.appendAssumeCapacity(word);
|
||||
}
|
||||
|
||||
pub fn writeWords(section: *Section, words: []const Word) void {
|
||||
section.instructions.appendSliceAssumeCapacity(words);
|
||||
}
|
||||
|
||||
fn writeDoubleWord(section: *Section, dword: DoubleWord) void {
|
||||
section.writeWords(&.{
|
||||
@truncate(Word, dword),
|
||||
@truncate(Word, dword >> @bitSizeOf(Word)),
|
||||
});
|
||||
}
|
||||
|
||||
fn writeOperands(section: *Section, comptime Operands: type, operands: Operands) void {
|
||||
const fields = switch (@typeInfo(Operands)) {
|
||||
.Struct => |info| info.fields,
|
||||
.Void => return,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
inline for (fields) |field| {
|
||||
section.writeOperand(field.field_type, @field(operands, field.name));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) void {
|
||||
switch (Operand) {
|
||||
spec.IdResultType, spec.IdResult, spec.IdRef => section.writeWord(operand.id),
|
||||
|
||||
spec.LiteralInteger => section.writeWord(operand),
|
||||
|
||||
spec.LiteralString => section.writeString(operand),
|
||||
|
||||
spec.LiteralContextDependentNumber => section.writeContextDependentNumber(operand),
|
||||
|
||||
spec.LiteralExtInstInteger => section.writeWord(operand.inst),
|
||||
|
||||
// TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec json,
|
||||
// so it most likely needs to be altered into something that can actually describe the entire
|
||||
// instruction in which it is used.
|
||||
spec.LiteralSpecConstantOpInteger => section.writeWord(@enumToInt(operand.opcode)),
|
||||
|
||||
spec.PairLiteralIntegerIdRef => section.writeWords(&.{ operand.value, operand.label.id }),
|
||||
spec.PairIdRefLiteralInteger => section.writeWords(&.{ operand.target.id, operand.member }),
|
||||
spec.PairIdRefIdRef => section.writeWords(&.{ operand[0].id, operand[1].id }),
|
||||
|
||||
else => switch (@typeInfo(Operand)) {
|
||||
.Enum => section.writeWord(@enumToInt(operand)),
|
||||
.Optional => |info| if (operand) |child| {
|
||||
section.writeOperand(info.child, child);
|
||||
},
|
||||
.Pointer => |info| {
|
||||
std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec.
|
||||
for (operand) |item| {
|
||||
section.writeOperand(info.child, item);
|
||||
}
|
||||
},
|
||||
.Struct => |info| {
|
||||
if (info.layout == .Packed) {
|
||||
section.writeWord(@bitCast(Word, operand));
|
||||
} else {
|
||||
section.writeExtendedMask(Operand, operand);
|
||||
}
|
||||
},
|
||||
.Union => section.writeExtendedUnion(Operand, operand),
|
||||
else => unreachable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn writeString(section: *Section, str: []const u8) void {
|
||||
// TODO: Not actually sure whether this is correct for big-endian.
|
||||
// See https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html#Literal
|
||||
const zero_terminated_len = str.len + 1;
|
||||
var i: usize = 0;
|
||||
while (i < zero_terminated_len) : (i += @sizeOf(Word)) {
|
||||
var word: Word = 0;
|
||||
|
||||
var j: usize = 0;
|
||||
while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) {
|
||||
word |= @as(Word, str[i + j]) << @intCast(Log2Word, j * std.meta.bitCount(u8));
|
||||
}
|
||||
|
||||
section.instructions.appendAssumeCapacity(word);
|
||||
}
|
||||
}
|
||||
|
||||
fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void {
|
||||
switch (operand) {
|
||||
.int32 => |int| section.writeWord(@bitCast(Word, int)),
|
||||
.uint32 => |int| section.writeWord(@bitCast(Word, int)),
|
||||
.int64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
|
||||
.uint64 => |int| section.writeDoubleWord(@bitCast(DoubleWord, int)),
|
||||
.float32 => |float| section.writeWord(@bitCast(Word, float)),
|
||||
.float64 => |float| section.writeDoubleWord(@bitCast(DoubleWord, float)),
|
||||
}
|
||||
}
|
||||
|
||||
fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void {
|
||||
var mask: Word = 0;
|
||||
inline for (@typeInfo(Operand).Struct.fields) |field, bit| {
|
||||
switch (@typeInfo(field.field_type)) {
|
||||
.Optional => if (@field(operand, field.name) != null) {
|
||||
mask |= 1 << @intCast(u5, bit);
|
||||
},
|
||||
.Bool => if (@field(operand, field.name)) {
|
||||
mask |= 1 << @intCast(u5, bit);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
if (mask == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
section.writeWord(mask);
|
||||
|
||||
inline for (@typeInfo(Operand).Struct.fields) |field| {
|
||||
switch (@typeInfo(field.field_type)) {
|
||||
.Optional => |info| if (@field(operand, field.name)) |child| {
|
||||
section.writeOperands(info.child, child);
|
||||
},
|
||||
.Bool => {},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operand) void {
|
||||
const tag = std.meta.activeTag(operand);
|
||||
section.writeWord(@enumToInt(tag));
|
||||
|
||||
inline for (@typeInfo(Operand).Union.fields) |field| {
|
||||
if (@field(Operand, field.name) == tag) {
|
||||
section.writeOperands(field.field_type, @field(operand, field.name));
|
||||
return;
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn instructionSize(comptime opcode: spec.Opcode, operands: opcode.Operands()) usize {
|
||||
return 1 + operandsSize(opcode.Operands(), operands);
|
||||
}
|
||||
|
||||
fn operandsSize(comptime Operands: type, operands: Operands) usize {
|
||||
const fields = switch (@typeInfo(Operands)) {
|
||||
.Struct => |info| info.fields,
|
||||
.Void => return 0,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
var total: usize = 0;
|
||||
inline for (fields) |field| {
|
||||
total += operandSize(field.field_type, @field(operands, field.name));
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
fn operandSize(comptime Operand: type, operand: Operand) usize {
|
||||
return switch (Operand) {
|
||||
spec.IdResultType,
|
||||
spec.IdResult,
|
||||
spec.IdRef,
|
||||
spec.LiteralInteger,
|
||||
spec.LiteralExtInstInteger,
|
||||
=> 1,
|
||||
|
||||
spec.LiteralString => std.math.divCeil(usize, operand.len + 1, @sizeOf(Word)) catch unreachable, // Add one for zero-terminator
|
||||
|
||||
spec.LiteralContextDependentNumber => switch (operand) {
|
||||
.int32, .uint32, .float32 => @as(usize, 1),
|
||||
.int64, .uint64, .float64 => @as(usize, 2),
|
||||
},
|
||||
|
||||
// TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec
|
||||
// json, so it most likely needs to be altered into something that can actually
|
||||
// describe the entire insturction in which it is used.
|
||||
spec.LiteralSpecConstantOpInteger => 1,
|
||||
|
||||
spec.PairLiteralIntegerIdRef,
|
||||
spec.PairIdRefLiteralInteger,
|
||||
spec.PairIdRefIdRef,
|
||||
=> 2,
|
||||
|
||||
else => switch (@typeInfo(Operand)) {
|
||||
.Enum => 1,
|
||||
.Optional => |info| if (operand) |child| operandSize(info.child, child) else 0,
|
||||
.Pointer => |info| blk: {
|
||||
std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec.
|
||||
var total: usize = 0;
|
||||
for (operand) |item| {
|
||||
total += operandSize(info.child, item);
|
||||
}
|
||||
break :blk total;
|
||||
},
|
||||
.Struct => |info| if (info.layout == .Packed) 1 else extendedMaskSize(Operand, operand),
|
||||
.Union => extendedUnionSize(Operand, operand),
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn extendedMaskSize(comptime Operand: type, operand: Operand) usize {
|
||||
var total: usize = 0;
|
||||
var any_set = false;
|
||||
inline for (@typeInfo(Operand).Struct.fields) |field| {
|
||||
switch (@typeInfo(field.field_type)) {
|
||||
.Optional => |info| if (@field(operand, field.name)) |child| {
|
||||
total += operandsSize(info.child, child);
|
||||
any_set = true;
|
||||
},
|
||||
.Bool => if (@field(operand, field.name)) {
|
||||
any_set = true;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
if (!any_set) {
|
||||
return 0;
|
||||
}
|
||||
return total + 1; // Add one for the mask itself.
|
||||
}
|
||||
|
||||
fn extendedUnionSize(comptime Operand: type, operand: Operand) usize {
|
||||
const tag = std.meta.activeTag(operand);
|
||||
inline for (@typeInfo(Operand).Union.fields) |field| {
|
||||
if (@field(Operand, field.name) == tag) {
|
||||
// Add one for the tag itself.
|
||||
return 1 + operandsSize(field.field_type, @field(operand, field.name));
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
test "SPIR-V Section emit() - no operands" {
|
||||
var section = Section{};
|
||||
defer section.deinit(std.testing.allocator);
|
||||
|
||||
try section.emit(std.testing.allocator, .OpNop, {});
|
||||
|
||||
try testing.expect(section.instructions.items[0] == (@as(Word, 1) << 16) | @enumToInt(Opcode.OpNop));
|
||||
}
|
||||
|
||||
test "SPIR-V Section emit() - simple" {
|
||||
var section = Section{};
|
||||
defer section.deinit(std.testing.allocator);
|
||||
|
||||
try section.emit(std.testing.allocator, .OpUndef, .{
|
||||
.id_result_type = .{ .id = 0 },
|
||||
.id_result = .{ .id = 1 },
|
||||
});
|
||||
|
||||
try testing.expectEqualSlices(Word, &.{
|
||||
(@as(Word, 3) << 16) | @enumToInt(Opcode.OpUndef),
|
||||
0,
|
||||
1,
|
||||
}, section.instructions.items);
|
||||
}
|
||||
|
||||
test "SPIR-V Section emit() - string" {
|
||||
var section = Section{};
|
||||
defer section.deinit(std.testing.allocator);
|
||||
|
||||
try section.emit(std.testing.allocator, .OpSource, .{
|
||||
.source_language = .Unknown,
|
||||
.version = 123,
|
||||
.file = .{ .id = 456 },
|
||||
.source = "pub fn main() void {}",
|
||||
});
|
||||
|
||||
try testing.expectEqualSlices(Word, &.{
|
||||
(@as(Word, 10) << 16) | @enumToInt(Opcode.OpSource),
|
||||
@enumToInt(spec.SourceLanguage.Unknown),
|
||||
123,
|
||||
456,
|
||||
std.mem.bytesToValue(Word, "pub "),
|
||||
std.mem.bytesToValue(Word, "fn m"),
|
||||
std.mem.bytesToValue(Word, "ain("),
|
||||
std.mem.bytesToValue(Word, ") vo"),
|
||||
std.mem.bytesToValue(Word, "id {"),
|
||||
std.mem.bytesToValue(Word, "}\x00\x00\x00"),
|
||||
}, section.instructions.items);
|
||||
}
|
||||
|
||||
test "SPIR-V Section emit()- extended mask" {
|
||||
var section = Section{};
|
||||
defer section.deinit(std.testing.allocator);
|
||||
|
||||
try section.emit(std.testing.allocator, .OpLoopMerge, .{
|
||||
.merge_block = .{ .id = 10 },
|
||||
.continue_target = .{ .id = 20 },
|
||||
.loop_control = .{
|
||||
.Unroll = true,
|
||||
.DependencyLength = .{
|
||||
.literal_integer = 2,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
try testing.expectEqualSlices(Word, &.{
|
||||
(@as(Word, 5) << 16) | @enumToInt(Opcode.OpLoopMerge),
|
||||
10,
|
||||
20,
|
||||
@bitCast(Word, spec.LoopControl{ .Unroll = true, .DependencyLength = true }),
|
||||
2,
|
||||
}, section.instructions.items);
|
||||
}
|
||||
|
||||
test "SPIR-V Section emit() - extended union" {
|
||||
var section = Section{};
|
||||
defer section.deinit(std.testing.allocator);
|
||||
|
||||
try section.emit(std.testing.allocator, .OpExecutionMode, .{
|
||||
.entry_point = .{ .id = 888 },
|
||||
.mode = .{
|
||||
.LocalSize = .{ .x_size = 4, .y_size = 8, .z_size = 16 },
|
||||
},
|
||||
});
|
||||
|
||||
try testing.expectEqualSlices(Word, &.{
|
||||
(@as(Word, 6) << 16) | @enumToInt(Opcode.OpExecutionMode),
|
||||
888,
|
||||
@enumToInt(spec.ExecutionMode.LocalSize),
|
||||
4,
|
||||
8,
|
||||
16,
|
||||
}, section.instructions.items);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
433
src/codegen/spirv/type.zig
Normal file
433
src/codegen/spirv/type.zig
Normal file
@ -0,0 +1,433 @@
|
||||
//! This module models a SPIR-V Type. These are distinct from Zig types, with some types
|
||||
//! which are not representable by Zig directly.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const spec = @import("spec.zig");
|
||||
|
||||
pub const Type = extern union {
|
||||
tag_if_small_enough: Tag,
|
||||
ptr_otherwise: *Payload,
|
||||
|
||||
/// A reference to another SPIR-V type.
|
||||
pub const Ref = usize;
|
||||
|
||||
pub fn initTag(comptime small_tag: Tag) Type {
|
||||
comptime assert(@enumToInt(small_tag) < Tag.no_payload_count);
|
||||
return .{ .tag_if_small_enough = small_tag };
|
||||
}
|
||||
|
||||
pub fn initPayload(pl: *Payload) Type {
|
||||
assert(@enumToInt(pl.tag) >= Tag.no_payload_count);
|
||||
return .{ .ptr_otherwise = pl };
|
||||
}
|
||||
|
||||
pub fn tag(self: Type) Tag {
|
||||
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
|
||||
return self.tag_if_small_enough;
|
||||
} else {
|
||||
return self.ptr_otherwise.tag;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() {
|
||||
if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count)
|
||||
return null;
|
||||
|
||||
if (self.ptr_otherwise.tag == t)
|
||||
return self.payload(t);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Access the payload of a type directly.
|
||||
pub fn payload(self: Type, comptime t: Tag) *t.Type() {
|
||||
assert(self.tag() == t);
|
||||
return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise);
|
||||
}
|
||||
|
||||
/// Perform a shallow equality test, comparing two types while assuming that any child types
|
||||
/// are equal only if their references are equal.
|
||||
pub fn eqlShallow(a: Type, b: Type) bool {
|
||||
if (a.tag_if_small_enough == b.tag_if_small_enough)
|
||||
return true;
|
||||
|
||||
const tag_a = a.tag();
|
||||
const tag_b = b.tag();
|
||||
if (tag_a != tag_b)
|
||||
return false;
|
||||
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
const t = @field(Tag, field.name);
|
||||
if (t == tag_a) {
|
||||
return eqlPayloads(t, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
/// Compare the payload of two compatible tags, given that we already know the tag of both types.
|
||||
fn eqlPayloads(comptime t: Tag, a: Type, b: Type) bool {
|
||||
switch (t) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
=> return true,
|
||||
.int,
|
||||
.float,
|
||||
.vector,
|
||||
.matrix,
|
||||
.sampled_image,
|
||||
.array,
|
||||
.runtime_array,
|
||||
.@"opaque",
|
||||
.pointer,
|
||||
.pipe,
|
||||
.image,
|
||||
=> return std.meta.eql(a.payload(t).*, b.payload(t).*),
|
||||
.@"struct" => {
|
||||
const struct_a = a.payload(.@"struct");
|
||||
const struct_b = b.payload(.@"struct");
|
||||
if (struct_a.members.len != struct_b.members.len)
|
||||
return false;
|
||||
for (struct_a.members) |mem_a, i| {
|
||||
if (!std.meta.eql(mem_a, struct_b.members[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.@"function" => {
|
||||
const fn_a = a.payload(.function);
|
||||
const fn_b = b.payload(.function);
|
||||
if (fn_a.return_type != fn_b.return_type)
|
||||
return false;
|
||||
return std.mem.eql(Ref, fn_a.parameters, fn_b.parameters);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a shallow hash, which hashes the reference value of child types instead of recursing.
|
||||
pub fn hashShallow(self: Type) u64 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
const t = self.tag();
|
||||
std.hash.autoHash(&hasher, t);
|
||||
|
||||
inline for (@typeInfo(Tag).Enum.fields) |field| {
|
||||
if (@field(Tag, field.name) == t) {
|
||||
switch (@field(Tag, field.name)) {
|
||||
.void,
|
||||
.bool,
|
||||
.sampler,
|
||||
.event,
|
||||
.device_event,
|
||||
.reserve_id,
|
||||
.queue,
|
||||
.pipe_storage,
|
||||
.named_barrier,
|
||||
=> {},
|
||||
else => self.hashPayload(@field(Tag, field.name), &hasher),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
/// Perform a shallow hash, given that we know the tag of the field ahead of time.
|
||||
fn hashPayload(self: Type, comptime t: Tag, hasher: *std.hash.Wyhash) void {
|
||||
const fields = @typeInfo(t.Type()).Struct.fields;
|
||||
const pl = self.payload(t);
|
||||
comptime assert(std.mem.eql(u8, fields[0].name, "base"));
|
||||
inline for (fields[1..]) |field| { // Skip the 'base' field.
|
||||
std.hash.autoHashStrat(hasher, @field(pl, field.name), .DeepRecursive);
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash context that hashes and compares types in a shallow fashion, useful for type caches.
|
||||
pub const ShallowHashContext32 = struct {
|
||||
pub fn hash(self: @This(), t: Type) u32 {
|
||||
_ = self;
|
||||
return @truncate(u32, t.hashShallow());
|
||||
}
|
||||
pub fn eql(self: @This(), a: Type, b: Type) bool {
|
||||
_ = self;
|
||||
return a.eqlShallow(b);
|
||||
}
|
||||
};
|
||||
|
||||
/// Return the reference to any child type. Asserts the type is one of:
|
||||
/// - Vectors
|
||||
/// - Matrices
|
||||
/// - Images
|
||||
/// - SampledImages,
|
||||
/// - Arrays
|
||||
/// - RuntimeArrays
|
||||
/// - Pointers
|
||||
pub fn childType(self: Type) Ref {
|
||||
return switch (self.tag()) {
|
||||
.vector => self.payload(.vector).component_type,
|
||||
.matrix => self.payload(.matrix).column_type,
|
||||
.image => self.payload(.image).sampled_type,
|
||||
.sampled_image => self.payload(.sampled_image).image_type,
|
||||
.array => self.payload(.array).element_type,
|
||||
.runtime_array => self.payload(.runtime_array).element_type,
|
||||
.pointer => self.payload(.pointer).child_type,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Tag = enum(usize) {
|
||||
void,
|
||||
bool,
|
||||
sampler,
|
||||
event,
|
||||
device_event,
|
||||
reserve_id,
|
||||
queue,
|
||||
pipe_storage,
|
||||
named_barrier,
|
||||
|
||||
// After this, the tag requires a payload.
|
||||
int,
|
||||
float,
|
||||
vector,
|
||||
matrix,
|
||||
image,
|
||||
sampled_image,
|
||||
array,
|
||||
runtime_array,
|
||||
@"struct",
|
||||
@"opaque",
|
||||
pointer,
|
||||
function,
|
||||
pipe,
|
||||
|
||||
pub const last_no_payload_tag = Tag.named_barrier;
|
||||
pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1;
|
||||
|
||||
pub fn Type(comptime t: Tag) type {
|
||||
return switch (t) {
|
||||
.void, .bool, .sampler, .event, .device_event, .reserve_id, .queue, .pipe_storage, .named_barrier => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"),
|
||||
.int => Payload.Int,
|
||||
.float => Payload.Float,
|
||||
.vector => Payload.Vector,
|
||||
.matrix => Payload.Matrix,
|
||||
.image => Payload.Image,
|
||||
.sampled_image => Payload.SampledImage,
|
||||
.array => Payload.Array,
|
||||
.runtime_array => Payload.RuntimeArray,
|
||||
.@"struct" => Payload.Struct,
|
||||
.@"opaque" => Payload.Opaque,
|
||||
.pointer => Payload.Pointer,
|
||||
.function => Payload.Function,
|
||||
.pipe => Payload.Pipe,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Payload = struct {
|
||||
tag: Tag,
|
||||
|
||||
pub const Int = struct {
|
||||
base: Payload = .{ .tag = .int },
|
||||
width: u32,
|
||||
signedness: std.builtin.Signedness,
|
||||
};
|
||||
|
||||
pub const Float = struct {
|
||||
base: Payload = .{ .tag = .float },
|
||||
width: u32,
|
||||
};
|
||||
|
||||
pub const Vector = struct {
|
||||
base: Payload = .{ .tag = .vector },
|
||||
component_type: Ref,
|
||||
component_count: u32,
|
||||
};
|
||||
|
||||
pub const Matrix = struct {
|
||||
base: Payload = .{ .tag = .matrix },
|
||||
column_type: Ref,
|
||||
column_count: u32,
|
||||
};
|
||||
|
||||
pub const Image = struct {
|
||||
base: Payload = .{ .tag = .image },
|
||||
sampled_type: Ref,
|
||||
dim: spec.Dim,
|
||||
depth: enum(u2) {
|
||||
no = 0,
|
||||
yes = 1,
|
||||
maybe = 2,
|
||||
},
|
||||
arrayed: bool,
|
||||
multisampled: bool,
|
||||
sampled: enum(u2) {
|
||||
known_at_runtime = 0,
|
||||
with_sampler = 1,
|
||||
without_sampler = 2,
|
||||
},
|
||||
format: spec.ImageFormat,
|
||||
access_qualifier: ?spec.AccessQualifier,
|
||||
};
|
||||
|
||||
pub const SampledImage = struct {
|
||||
base: Payload = .{ .tag = .sampled_image },
|
||||
image_type: Ref,
|
||||
};
|
||||
|
||||
pub const Array = struct {
|
||||
base: Payload = .{ .tag = .array },
|
||||
element_type: Ref,
|
||||
/// Note: Must be emitted as constant, not as literal!
|
||||
length: u32,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32,
|
||||
};
|
||||
|
||||
pub const RuntimeArray = struct {
|
||||
base: Payload = .{ .tag = .runtime_array },
|
||||
element_type: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32,
|
||||
};
|
||||
|
||||
pub const Struct = struct {
|
||||
base: Payload = .{ .tag = .@"struct" },
|
||||
members: []Member,
|
||||
decorations: StructDecorations,
|
||||
|
||||
/// Extra information for decorations, packed for efficiency. Fields are stored sequentially by
|
||||
/// order of the `members` slice and `MemberDecorations` struct.
|
||||
member_decoration_extra: []u32,
|
||||
|
||||
pub const Member = struct {
|
||||
ty: Ref,
|
||||
offset: u32,
|
||||
decorations: MemberDecorations,
|
||||
};
|
||||
|
||||
pub const StructDecorations = packed struct {
|
||||
/// Type has the 'Block' decoration.
|
||||
block: bool,
|
||||
/// Type has the 'BufferBlock' decoration.
|
||||
buffer_block: bool,
|
||||
/// Type has the 'GLSLShared' decoration.
|
||||
glsl_shared: bool,
|
||||
/// Type has the 'GLSLPacked' decoration.
|
||||
glsl_packed: bool,
|
||||
/// Type has the 'CPacked' decoration.
|
||||
c_packed: bool,
|
||||
};
|
||||
|
||||
pub const MemberDecorations = packed struct {
|
||||
/// Matrix layout for (arrays of) matrices. If this field is not .none,
|
||||
/// then there is also an extra field containing the matrix stride corresponding
|
||||
/// to the 'MatrixStride' decoration.
|
||||
matrix_layout: enum(u2) {
|
||||
/// Member has the 'RowMajor' decoration. The member type
|
||||
/// must be a matrix or an array of matrices.
|
||||
row_major,
|
||||
/// Member has the 'ColMajor' decoration. The member type
|
||||
/// must be a matrix or an array of matrices.
|
||||
col_major,
|
||||
/// Member is not a matrix or array of matrices.
|
||||
none,
|
||||
},
|
||||
|
||||
// Regular decorations, these do not imply extra fields.
|
||||
|
||||
/// Member has the 'NoPerspective' decoration.
|
||||
no_perspective: bool,
|
||||
/// Member has the 'Flat' decoration.
|
||||
flat: bool,
|
||||
/// Member has the 'Patch' decoration.
|
||||
patch: bool,
|
||||
/// Member has the 'Centroid' decoration.
|
||||
centroid: bool,
|
||||
/// Member has the 'Sample' decoration.
|
||||
sample: bool,
|
||||
/// Member has the 'Invariant' decoration.
|
||||
/// Note: requires parent struct to have 'Block'.
|
||||
invariant: bool,
|
||||
/// Member has the 'Volatile' decoration.
|
||||
@"volatile": bool,
|
||||
/// Member has the 'Coherent' decoration.
|
||||
coherent: bool,
|
||||
/// Member has the 'NonWritable' decoration.
|
||||
non_writable: bool,
|
||||
/// Member has the 'NonReadable' decoration.
|
||||
non_readable: bool,
|
||||
|
||||
// The following decorations all imply extra field(s).
|
||||
|
||||
/// Member has the 'BuiltIn' decoration.
|
||||
/// This decoration has an extra field of type `spec.BuiltIn`.
|
||||
/// Note: If any member of a struct has the BuiltIn decoration, all members must have one.
|
||||
/// Note: Each builtin may only be reachable once for a particular entry point.
|
||||
/// Note: The member type may be constrained by a particular built-in, defined in the client API specification.
|
||||
builtin: bool,
|
||||
/// Member has the 'Stream' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
stream: bool,
|
||||
/// Member has the 'Location' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
location: bool,
|
||||
/// Member has the 'Component' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
component: bool,
|
||||
/// Member has the 'XfbBuffer' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
xfb_buffer: bool,
|
||||
/// Member has the 'XfbStride' decoration.
|
||||
/// This member has an extra field of type `u32`.
|
||||
xfb_stride: bool,
|
||||
/// Member has the 'UserSemantic' decoration.
|
||||
/// This member has an extra field of type `[]u8`, which is encoded
|
||||
/// by an `u32` containing the number of chars exactly, and then the string padded to
|
||||
/// a multiple of 4 bytes with zeroes.
|
||||
user_semantic: bool,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Opaque = struct {
|
||||
base: Payload = .{ .tag = .@"opaque" },
|
||||
name: []u8,
|
||||
};
|
||||
|
||||
pub const Pointer = struct {
|
||||
base: Payload = .{ .tag = .pointer },
|
||||
storage_class: spec.StorageClass,
|
||||
child_type: Ref,
|
||||
/// Type has the 'ArrayStride' decoration.
|
||||
/// This is valid for pointers to elements of an array.
|
||||
/// If zero, no stride is present.
|
||||
array_stride: u32,
|
||||
/// Type has the 'Alignment' decoration.
|
||||
alignment: ?u32,
|
||||
/// Type has the 'MaxByteOffset' decoration.
|
||||
max_byte_offset: ?u32,
|
||||
};
|
||||
|
||||
pub const Function = struct {
|
||||
base: Payload = .{ .tag = .function },
|
||||
return_type: Ref,
|
||||
parameters: []Ref,
|
||||
};
|
||||
|
||||
pub const Pipe = struct {
|
||||
base: Payload = .{ .tag = .pipe },
|
||||
qualifier: spec.AccessQualifier,
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -24,6 +24,7 @@ const SpirV = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||
const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.link);
|
||||
|
||||
@ -31,19 +32,21 @@ const Module = @import("../Module.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const link = @import("../link.zig");
|
||||
const codegen = @import("../codegen/spirv.zig");
|
||||
const Word = codegen.Word;
|
||||
const ResultId = codegen.ResultId;
|
||||
const trace = @import("../tracy.zig").trace;
|
||||
const build_options = @import("build_options");
|
||||
const spec = @import("../codegen/spirv/spec.zig");
|
||||
const Air = @import("../Air.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const Value = @import("../value.zig").Value;
|
||||
|
||||
const SpvModule = @import("../codegen/spirv/Module.zig");
|
||||
const spec = @import("../codegen/spirv/spec.zig");
|
||||
const IdResult = spec.IdResult;
|
||||
|
||||
// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
|
||||
pub const FnData = struct {
|
||||
// We're going to fill these in flushModule, and we're going to fill them unconditionally,
|
||||
// so just set it to undefined.
|
||||
id: ResultId = undefined,
|
||||
id: IdResult = undefined,
|
||||
};
|
||||
|
||||
base: link.File,
|
||||
@ -55,7 +58,15 @@ decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{},
|
||||
|
||||
const DeclGenContext = struct {
|
||||
air: Air,
|
||||
air_value_arena: ArenaAllocator.State,
|
||||
liveness: Liveness,
|
||||
|
||||
fn deinit(self: *DeclGenContext, gpa: Allocator) void {
|
||||
self.air.deinit(gpa);
|
||||
self.liveness.deinit(gpa);
|
||||
self.air_value_arena.promote(gpa).deinit();
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createEmpty(gpa: Allocator, options: link.Options) !*SpirV {
|
||||
@ -113,12 +124,27 @@ pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liv
|
||||
@panic("Attempted to compile for architecture that was disabled by build configuration");
|
||||
}
|
||||
_ = module;
|
||||
// Keep track of all decls so we can iterate over them on flush().
|
||||
_ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl);
|
||||
|
||||
_ = air;
|
||||
_ = liveness;
|
||||
@panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later");
|
||||
// Keep track of all decls so we can iterate over them on flush().
|
||||
const result = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl);
|
||||
if (result.found_existing) {
|
||||
result.value_ptr.deinit(self.base.allocator);
|
||||
}
|
||||
|
||||
var arena = ArenaAllocator.init(self.base.allocator);
|
||||
errdefer arena.deinit();
|
||||
|
||||
var new_air = try cloneAir(air, self.base.allocator, arena.allocator());
|
||||
errdefer new_air.deinit(self.base.allocator);
|
||||
|
||||
var new_liveness = try cloneLiveness(liveness, self.base.allocator);
|
||||
errdefer new_liveness.deinit(self.base.allocator);
|
||||
|
||||
result.value_ptr.* = .{
|
||||
.air = new_air,
|
||||
.air_value_arena = arena.state,
|
||||
.liveness = new_liveness,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
|
||||
@ -143,7 +169,11 @@ pub fn updateDeclExports(
|
||||
}
|
||||
|
||||
pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void {
|
||||
assert(self.decl_table.swapRemove(decl));
|
||||
const index = self.decl_table.getIndex(decl).?;
|
||||
if (decl.val.tag() == .function) {
|
||||
self.decl_table.values()[index].deinit(self.base.allocator);
|
||||
}
|
||||
self.decl_table.swapRemoveAt(index);
|
||||
}
|
||||
|
||||
pub fn flush(self: *SpirV, comp: *Compilation) !void {
|
||||
@ -165,7 +195,10 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
|
||||
const module = self.base.options.module.?;
|
||||
const target = comp.getTarget();
|
||||
|
||||
var spv = codegen.SPIRVModule.init(self.base.allocator, module);
|
||||
var arena = std.heap.ArenaAllocator.init(self.base.allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
var spv = SpvModule.init(self.base.allocator, arena.allocator());
|
||||
defer spv.deinit();
|
||||
|
||||
// Allocate an ID for every declaration before generating code,
|
||||
@ -173,73 +206,38 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
|
||||
// TODO: We're allocating an ID unconditionally now, are there
|
||||
// declarations which don't generate a result?
|
||||
// TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
|
||||
{
|
||||
for (self.decl_table.keys()) |decl| {
|
||||
if (!decl.has_tv) continue;
|
||||
|
||||
decl.fn_link.spirv.id = spv.allocResultId();
|
||||
for (self.decl_table.keys()) |decl| {
|
||||
if (decl.has_tv) {
|
||||
decl.fn_link.spirv.id = spv.allocId();
|
||||
}
|
||||
}
|
||||
|
||||
// Now, actually generate the code for all declarations.
|
||||
{
|
||||
var decl_gen = codegen.DeclGen.init(&spv);
|
||||
defer decl_gen.deinit();
|
||||
var decl_gen = codegen.DeclGen.init(module, &spv);
|
||||
defer decl_gen.deinit();
|
||||
|
||||
var it = self.decl_table.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl = entry.key_ptr.*;
|
||||
if (!decl.has_tv) continue;
|
||||
var it = self.decl_table.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl = entry.key_ptr.*;
|
||||
if (!decl.has_tv) continue;
|
||||
|
||||
const air = entry.value_ptr.air;
|
||||
const liveness = entry.value_ptr.liveness;
|
||||
const air = entry.value_ptr.air;
|
||||
const liveness = entry.value_ptr.liveness;
|
||||
|
||||
if (try decl_gen.gen(decl, air, liveness)) |msg| {
|
||||
try module.failed_decls.put(module.gpa, decl, msg);
|
||||
return; // TODO: Attempt to generate more decls?
|
||||
}
|
||||
// Note, if `decl` is not a function, air/liveness may be undefined.
|
||||
if (try decl_gen.gen(decl, air, liveness)) |msg| {
|
||||
try module.failed_decls.put(module.gpa, decl, msg);
|
||||
return; // TODO: Attempt to generate more decls?
|
||||
}
|
||||
}
|
||||
|
||||
try writeCapabilities(&spv.binary.capabilities_and_extensions, target);
|
||||
try writeMemoryModel(&spv.binary.capabilities_and_extensions, target);
|
||||
try writeCapabilities(&spv, target);
|
||||
try writeMemoryModel(&spv, target);
|
||||
|
||||
const header = [_]Word{
|
||||
spec.magic_number,
|
||||
(spec.version.major << 16) | (spec.version.minor << 8),
|
||||
0, // TODO: Register Zig compiler magic number.
|
||||
spv.resultIdBound(),
|
||||
0, // Schema (currently reserved for future use in the SPIR-V spec).
|
||||
};
|
||||
|
||||
// Note: The order of adding sections to the final binary
|
||||
// follows the SPIR-V logical module format!
|
||||
const buffers = &[_][]const Word{
|
||||
&header,
|
||||
spv.binary.capabilities_and_extensions.items,
|
||||
spv.binary.debug_strings.items,
|
||||
spv.binary.types_globals_constants.items,
|
||||
spv.binary.fn_decls.items,
|
||||
};
|
||||
|
||||
var iovc_buffers: [buffers.len]std.os.iovec_const = undefined;
|
||||
for (iovc_buffers) |*iovc, i| {
|
||||
const bytes = std.mem.sliceAsBytes(buffers[i]);
|
||||
iovc.* = .{ .iov_base = bytes.ptr, .iov_len = bytes.len };
|
||||
}
|
||||
|
||||
var file_size: u64 = 0;
|
||||
for (iovc_buffers) |iov| {
|
||||
file_size += iov.iov_len;
|
||||
}
|
||||
|
||||
const file = self.base.file.?;
|
||||
try file.seekTo(0);
|
||||
try file.setEndPos(file_size);
|
||||
try file.pwritevAll(&iovc_buffers, 0);
|
||||
try spv.flush(self.base.file.?);
|
||||
}
|
||||
|
||||
fn writeCapabilities(binary: *std.ArrayList(Word), target: std.Target) !void {
|
||||
fn writeCapabilities(spv: *SpvModule, target: std.Target) !void {
|
||||
// TODO: Integrate with a hypothetical feature system
|
||||
const cap: spec.Capability = switch (target.os.tag) {
|
||||
.opencl => .Kernel,
|
||||
@ -248,10 +246,12 @@ fn writeCapabilities(binary: *std.ArrayList(Word), target: std.Target) !void {
|
||||
else => unreachable, // TODO
|
||||
};
|
||||
|
||||
try codegen.writeInstruction(binary, .OpCapability, &[_]Word{@enumToInt(cap)});
|
||||
try spv.sections.capabilities.emit(spv.gpa, .OpCapability, .{
|
||||
.capability = cap,
|
||||
});
|
||||
}
|
||||
|
||||
fn writeMemoryModel(binary: *std.ArrayList(Word), target: std.Target) !void {
|
||||
fn writeMemoryModel(spv: *SpvModule, target: std.Target) !void {
|
||||
const addressing_model = switch (target.os.tag) {
|
||||
.opencl => switch (target.cpu.arch) {
|
||||
.spirv32 => spec.AddressingModel.Physical32,
|
||||
@ -269,7 +269,41 @@ fn writeMemoryModel(binary: *std.ArrayList(Word), target: std.Target) !void {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
try codegen.writeInstruction(binary, .OpMemoryModel, &[_]Word{
|
||||
@enumToInt(addressing_model), @enumToInt(memory_model),
|
||||
// TODO: Put this in a proper section.
|
||||
try spv.sections.capabilities.emit(spv.gpa, .OpMemoryModel, .{
|
||||
.addressing_model = addressing_model,
|
||||
.memory_model = memory_model,
|
||||
});
|
||||
}
|
||||
|
||||
fn cloneLiveness(l: Liveness, gpa: Allocator) !Liveness {
|
||||
const tomb_bits = try gpa.dupe(usize, l.tomb_bits);
|
||||
errdefer gpa.free(tomb_bits);
|
||||
|
||||
const extra = try gpa.dupe(u32, l.extra);
|
||||
errdefer gpa.free(extra);
|
||||
|
||||
return Liveness{
|
||||
.tomb_bits = tomb_bits,
|
||||
.extra = extra,
|
||||
.special = try l.special.clone(gpa),
|
||||
};
|
||||
}
|
||||
|
||||
fn cloneAir(air: Air, gpa: Allocator, value_arena: Allocator) !Air {
|
||||
const values = try gpa.alloc(Value, air.values.len);
|
||||
errdefer gpa.free(values);
|
||||
|
||||
for (values) |*value, i| {
|
||||
value.* = try air.values[i].copy(value_arena);
|
||||
}
|
||||
|
||||
var instructions = try air.instructions.toMultiArrayList().clone(gpa);
|
||||
errdefer instructions.deinit(gpa);
|
||||
|
||||
return Air{
|
||||
.instructions = instructions.slice(),
|
||||
.extra = try gpa.dupe(u32, air.extra),
|
||||
.values = values,
|
||||
};
|
||||
}
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
const std = @import("std");
|
||||
const g = @import("spirv/grammar.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const ExtendedStructSet = std.StringHashMap(void);
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
@ -20,101 +23,308 @@ pub fn main() !void {
|
||||
var tokens = std.json.TokenStream.init(spec);
|
||||
var registry = try std.json.parse(g.Registry, &tokens, .{ .allocator = allocator });
|
||||
|
||||
const core_reg = switch (registry) {
|
||||
.core => |core_reg| core_reg,
|
||||
.extension => return error.TODOSpirVExtensionSpec,
|
||||
};
|
||||
|
||||
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
|
||||
try render(bw.writer(), registry);
|
||||
try render(bw.writer(), allocator, core_reg);
|
||||
try bw.flush();
|
||||
}
|
||||
|
||||
fn render(writer: anytype, registry: g.Registry) !void {
|
||||
/// Returns a set with types that require an extra struct for the `Instruction` interface
|
||||
/// to the spir-v spec, or whether the original type can be used.
|
||||
fn extendedStructs(
|
||||
arena: Allocator,
|
||||
kinds: []const g.OperandKind,
|
||||
) !ExtendedStructSet {
|
||||
var map = ExtendedStructSet.init(arena);
|
||||
try map.ensureTotalCapacity(@intCast(u32, kinds.len));
|
||||
|
||||
for (kinds) |kind| {
|
||||
const enumerants = kind.enumerants orelse continue;
|
||||
|
||||
for (enumerants) |enumerant| {
|
||||
if (enumerant.parameters.len > 0) {
|
||||
break;
|
||||
}
|
||||
} else continue;
|
||||
|
||||
map.putAssumeCapacity(kind.kind, {});
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
// Return a score for a particular priority. Duplicate instruction/operand enum values are
|
||||
// removed by picking the tag with the lowest score to keep, and by making an alias for the
|
||||
// other. Note that the tag does not need to be just a tag at this point, in which case it
|
||||
// gets the lowest score automatically anyway.
|
||||
fn tagPriorityScore(tag: []const u8) usize {
|
||||
if (tag.len == 0) {
|
||||
return 1;
|
||||
} else if (std.mem.eql(u8, tag, "EXT")) {
|
||||
return 2;
|
||||
} else if (std.mem.eql(u8, tag, "KHR")) {
|
||||
return 3;
|
||||
} else {
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
|
||||
fn render(writer: anytype, allocator: Allocator, registry: g.CoreRegistry) !void {
|
||||
try writer.writeAll(
|
||||
\\//! This file is auto-generated by tools/gen_spirv_spec.zig.
|
||||
\\
|
||||
\\const Version = @import("std").builtin.Version;
|
||||
\\
|
||||
\\pub const Word = u32;
|
||||
\\pub const IdResultType = struct{
|
||||
\\ id: Word,
|
||||
\\ pub fn toRef(self: IdResultType) IdRef {
|
||||
\\ return .{.id = self.id};
|
||||
\\ }
|
||||
\\};
|
||||
\\pub const IdResult = struct{
|
||||
\\ id: Word,
|
||||
\\ pub fn toRef(self: IdResult) IdRef {
|
||||
\\ return .{.id = self.id};
|
||||
\\ }
|
||||
\\ pub fn toResultType(self: IdResult) IdResultType {
|
||||
\\ return .{.id = self.id};
|
||||
\\ }
|
||||
\\};
|
||||
\\pub const IdRef = struct{ id: Word };
|
||||
\\
|
||||
\\pub const IdMemorySemantics = IdRef;
|
||||
\\pub const IdScope = IdRef;
|
||||
\\
|
||||
\\pub const LiteralInteger = Word;
|
||||
\\pub const LiteralString = []const u8;
|
||||
\\pub const LiteralContextDependentNumber = union(enum) {
|
||||
\\ int32: i32,
|
||||
\\ uint32: u32,
|
||||
\\ int64: i64,
|
||||
\\ uint64: u64,
|
||||
\\ float32: f32,
|
||||
\\ float64: f64,
|
||||
\\};
|
||||
\\pub const LiteralExtInstInteger = struct{ inst: Word };
|
||||
\\pub const LiteralSpecConstantOpInteger = struct { opcode: Opcode };
|
||||
\\pub const PairLiteralIntegerIdRef = struct { value: LiteralInteger, label: IdRef };
|
||||
\\pub const PairIdRefLiteralInteger = struct { target: IdRef, member: LiteralInteger };
|
||||
\\pub const PairIdRefIdRef = [2]IdRef;
|
||||
\\
|
||||
\\
|
||||
);
|
||||
|
||||
switch (registry) {
|
||||
.core => |core_reg| {
|
||||
try writer.print(
|
||||
\\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }};
|
||||
\\pub const magic_number: u32 = {s};
|
||||
\\
|
||||
,
|
||||
.{ core_reg.major_version, core_reg.minor_version, core_reg.revision, core_reg.magic_number },
|
||||
);
|
||||
try renderOpcodes(writer, core_reg.instructions);
|
||||
try renderOperandKinds(writer, core_reg.operand_kinds);
|
||||
},
|
||||
.extension => |ext_reg| {
|
||||
try writer.print(
|
||||
\\pub const version = Version{{ .major = {}, .minor = 0, .patch = {} }};
|
||||
\\
|
||||
,
|
||||
.{ ext_reg.version, ext_reg.revision },
|
||||
);
|
||||
try renderOpcodes(writer, ext_reg.instructions);
|
||||
try renderOperandKinds(writer, ext_reg.operand_kinds);
|
||||
},
|
||||
}
|
||||
try writer.print(
|
||||
\\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }};
|
||||
\\pub const magic_number: Word = {s};
|
||||
\\
|
||||
,
|
||||
.{ registry.major_version, registry.minor_version, registry.revision, registry.magic_number },
|
||||
);
|
||||
const extended_structs = try extendedStructs(allocator, registry.operand_kinds);
|
||||
try renderOpcodes(writer, allocator, registry.instructions, extended_structs);
|
||||
try renderOperandKinds(writer, allocator, registry.operand_kinds, extended_structs);
|
||||
}
|
||||
|
||||
fn renderOpcodes(writer: anytype, instructions: []const g.Instruction) !void {
|
||||
try writer.writeAll("pub const Opcode = extern enum(u16) {\n");
|
||||
for (instructions) |instr| {
|
||||
try writer.print(" {} = {},\n", .{ std.zig.fmtId(instr.opname), instr.opcode });
|
||||
fn renderOpcodes(
|
||||
writer: anytype,
|
||||
allocator: Allocator,
|
||||
instructions: []const g.Instruction,
|
||||
extended_structs: ExtendedStructSet,
|
||||
) !void {
|
||||
var inst_map = std.AutoArrayHashMap(u32, usize).init(allocator);
|
||||
try inst_map.ensureTotalCapacity(instructions.len);
|
||||
|
||||
var aliases = std.ArrayList(struct { inst: usize, alias: usize }).init(allocator);
|
||||
try aliases.ensureTotalCapacity(instructions.len);
|
||||
|
||||
for (instructions) |inst, i| {
|
||||
const result = inst_map.getOrPutAssumeCapacity(inst.opcode);
|
||||
if (!result.found_existing) {
|
||||
result.value_ptr.* = i;
|
||||
continue;
|
||||
}
|
||||
|
||||
const existing = instructions[result.value_ptr.*];
|
||||
|
||||
const tag_index = std.mem.indexOfDiff(u8, inst.opname, existing.opname).?;
|
||||
const inst_priority = tagPriorityScore(inst.opname[tag_index..]);
|
||||
const existing_priority = tagPriorityScore(existing.opname[tag_index..]);
|
||||
|
||||
if (inst_priority < existing_priority) {
|
||||
aliases.appendAssumeCapacity(.{ .inst = result.value_ptr.*, .alias = i });
|
||||
result.value_ptr.* = i;
|
||||
} else {
|
||||
aliases.appendAssumeCapacity(.{ .inst = i, .alias = result.value_ptr.* });
|
||||
}
|
||||
}
|
||||
try writer.writeAll(" _,\n};\n");
|
||||
|
||||
const instructions_indices = inst_map.values();
|
||||
|
||||
try writer.writeAll("pub const Opcode = enum(u16) {\n");
|
||||
for (instructions_indices) |i| {
|
||||
const inst = instructions[i];
|
||||
try writer.print("{} = {},\n", .{ std.zig.fmtId(inst.opname), inst.opcode });
|
||||
}
|
||||
|
||||
try writer.writeByte('\n');
|
||||
|
||||
for (aliases.items) |alias| {
|
||||
try writer.print("pub const {} = Opcode.{};\n", .{
|
||||
std.zig.fmtId(instructions[alias.inst].opname),
|
||||
std.zig.fmtId(instructions[alias.alias].opname),
|
||||
});
|
||||
}
|
||||
|
||||
try writer.writeAll(
|
||||
\\
|
||||
\\pub fn Operands(comptime self: Opcode) type {
|
||||
\\return switch (self) {
|
||||
\\
|
||||
);
|
||||
|
||||
for (instructions_indices) |i| {
|
||||
const inst = instructions[i];
|
||||
try renderOperand(writer, .instruction, inst.opname, inst.operands, extended_structs);
|
||||
}
|
||||
try writer.writeAll("};\n}\n};\n");
|
||||
_ = extended_structs;
|
||||
}
|
||||
|
||||
fn renderOperandKinds(writer: anytype, kinds: []const g.OperandKind) !void {
|
||||
fn renderOperandKinds(
|
||||
writer: anytype,
|
||||
allocator: Allocator,
|
||||
kinds: []const g.OperandKind,
|
||||
extended_structs: ExtendedStructSet,
|
||||
) !void {
|
||||
for (kinds) |kind| {
|
||||
switch (kind.category) {
|
||||
.ValueEnum => try renderValueEnum(writer, kind),
|
||||
.BitEnum => try renderBitEnum(writer, kind),
|
||||
.ValueEnum => try renderValueEnum(writer, allocator, kind, extended_structs),
|
||||
.BitEnum => try renderBitEnum(writer, allocator, kind, extended_structs),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn renderValueEnum(writer: anytype, enumeration: g.OperandKind) !void {
|
||||
try writer.print("pub const {s} = extern enum(u32) {{\n", .{enumeration.kind});
|
||||
|
||||
fn renderValueEnum(
|
||||
writer: anytype,
|
||||
allocator: Allocator,
|
||||
enumeration: g.OperandKind,
|
||||
extended_structs: ExtendedStructSet,
|
||||
) !void {
|
||||
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
|
||||
for (enumerants) |enumerant| {
|
||||
if (enumerant.value != .int) return error.InvalidRegistry;
|
||||
|
||||
try writer.print(" {} = {},\n", .{ std.zig.fmtId(enumerant.enumerant), enumerant.value.int });
|
||||
var enum_map = std.AutoArrayHashMap(u32, usize).init(allocator);
|
||||
try enum_map.ensureTotalCapacity(enumerants.len);
|
||||
|
||||
var aliases = std.ArrayList(struct { enumerant: usize, alias: usize }).init(allocator);
|
||||
try aliases.ensureTotalCapacity(enumerants.len);
|
||||
|
||||
for (enumerants) |enumerant, i| {
|
||||
const result = enum_map.getOrPutAssumeCapacity(enumerant.value.int);
|
||||
if (!result.found_existing) {
|
||||
result.value_ptr.* = i;
|
||||
continue;
|
||||
}
|
||||
|
||||
const existing = enumerants[result.value_ptr.*];
|
||||
|
||||
const tag_index = std.mem.indexOfDiff(u8, enumerant.enumerant, existing.enumerant).?;
|
||||
const enum_priority = tagPriorityScore(enumerant.enumerant[tag_index..]);
|
||||
const existing_priority = tagPriorityScore(existing.enumerant[tag_index..]);
|
||||
|
||||
if (enum_priority < existing_priority) {
|
||||
aliases.appendAssumeCapacity(.{ .enumerant = result.value_ptr.*, .alias = i });
|
||||
result.value_ptr.* = i;
|
||||
} else {
|
||||
aliases.appendAssumeCapacity(.{ .enumerant = i, .alias = result.value_ptr.* });
|
||||
}
|
||||
}
|
||||
|
||||
try writer.writeAll(" _,\n};\n");
|
||||
const enum_indices = enum_map.values();
|
||||
|
||||
try writer.print("pub const {s} = enum(u32) {{\n", .{std.zig.fmtId(enumeration.kind)});
|
||||
|
||||
for (enum_indices) |i| {
|
||||
const enumerant = enumerants[i];
|
||||
if (enumerant.value != .int) return error.InvalidRegistry;
|
||||
|
||||
try writer.print("{} = {},\n", .{ std.zig.fmtId(enumerant.enumerant), enumerant.value.int });
|
||||
}
|
||||
|
||||
try writer.writeByte('\n');
|
||||
|
||||
for (aliases.items) |alias| {
|
||||
try writer.print("pub const {} = {}.{};\n", .{
|
||||
std.zig.fmtId(enumerants[alias.enumerant].enumerant),
|
||||
std.zig.fmtId(enumeration.kind),
|
||||
std.zig.fmtId(enumerants[alias.alias].enumerant),
|
||||
});
|
||||
}
|
||||
|
||||
if (!extended_structs.contains(enumeration.kind)) {
|
||||
try writer.writeAll("};\n");
|
||||
return;
|
||||
}
|
||||
|
||||
try writer.print("\npub const Extended = union({}) {{\n", .{std.zig.fmtId(enumeration.kind)});
|
||||
|
||||
for (enum_indices) |i| {
|
||||
const enumerant = enumerants[i];
|
||||
try renderOperand(writer, .@"union", enumerant.enumerant, enumerant.parameters, extended_structs);
|
||||
}
|
||||
|
||||
try writer.writeAll("};\n};\n");
|
||||
}
|
||||
|
||||
fn renderBitEnum(writer: anytype, enumeration: g.OperandKind) !void {
|
||||
try writer.print("pub const {s} = packed struct {{\n", .{enumeration.kind});
|
||||
fn renderBitEnum(
|
||||
writer: anytype,
|
||||
allocator: Allocator,
|
||||
enumeration: g.OperandKind,
|
||||
extended_structs: ExtendedStructSet,
|
||||
) !void {
|
||||
try writer.print("pub const {s} = packed struct {{\n", .{std.zig.fmtId(enumeration.kind)});
|
||||
|
||||
var flags_by_bitpos = [_]?[]const u8{null} ** 32;
|
||||
var flags_by_bitpos = [_]?usize{null} ** 32;
|
||||
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
|
||||
for (enumerants) |enumerant| {
|
||||
|
||||
var aliases = std.ArrayList(struct { flag: usize, alias: u5 }).init(allocator);
|
||||
try aliases.ensureTotalCapacity(enumerants.len);
|
||||
|
||||
for (enumerants) |enumerant, i| {
|
||||
if (enumerant.value != .bitflag) return error.InvalidRegistry;
|
||||
const value = try parseHexInt(enumerant.value.bitflag);
|
||||
if (@popCount(u32, value) != 1) {
|
||||
continue; // Skip combinations and 'none' items
|
||||
if (@popCount(u32, value) == 0) {
|
||||
continue; // Skip 'none' items
|
||||
}
|
||||
|
||||
std.debug.assert(@popCount(u32, value) == 1);
|
||||
|
||||
var bitpos = std.math.log2_int(u32, value);
|
||||
if (flags_by_bitpos[bitpos]) |*existing| {
|
||||
// Keep the shortest
|
||||
if (enumerant.enumerant.len < existing.len)
|
||||
existing.* = enumerant.enumerant;
|
||||
const tag_index = std.mem.indexOfDiff(u8, enumerant.enumerant, enumerants[existing.*].enumerant).?;
|
||||
const enum_priority = tagPriorityScore(enumerant.enumerant[tag_index..]);
|
||||
const existing_priority = tagPriorityScore(enumerants[existing.*].enumerant[tag_index..]);
|
||||
|
||||
if (enum_priority < existing_priority) {
|
||||
aliases.appendAssumeCapacity(.{ .flag = existing.*, .alias = bitpos });
|
||||
existing.* = i;
|
||||
} else {
|
||||
aliases.appendAssumeCapacity(.{ .flag = i, .alias = bitpos });
|
||||
}
|
||||
} else {
|
||||
flags_by_bitpos[bitpos] = enumerant.enumerant;
|
||||
flags_by_bitpos[bitpos] = i;
|
||||
}
|
||||
}
|
||||
|
||||
for (flags_by_bitpos) |maybe_flag_name, bitpos| {
|
||||
try writer.writeAll(" ");
|
||||
if (maybe_flag_name) |flag_name| {
|
||||
try writer.writeAll(flag_name);
|
||||
for (flags_by_bitpos) |maybe_flag_index, bitpos| {
|
||||
if (maybe_flag_index) |flag_index| {
|
||||
try writer.print("{}", .{std.zig.fmtId(enumerants[flag_index].enumerant)});
|
||||
} else {
|
||||
try writer.print("_reserved_bit_{}", .{bitpos});
|
||||
}
|
||||
@ -126,7 +336,169 @@ fn renderBitEnum(writer: anytype, enumeration: g.OperandKind) !void {
|
||||
try writer.writeAll("= false,\n");
|
||||
}
|
||||
|
||||
try writer.writeAll("};\n");
|
||||
try writer.writeByte('\n');
|
||||
|
||||
for (aliases.items) |alias| {
|
||||
try writer.print("pub const {}: {} = .{{.{} = true}};\n", .{
|
||||
std.zig.fmtId(enumerants[alias.flag].enumerant),
|
||||
std.zig.fmtId(enumeration.kind),
|
||||
std.zig.fmtId(enumerants[flags_by_bitpos[alias.alias].?].enumerant),
|
||||
});
|
||||
}
|
||||
|
||||
if (!extended_structs.contains(enumeration.kind)) {
|
||||
try writer.writeAll("};\n");
|
||||
return;
|
||||
}
|
||||
|
||||
try writer.print("\npub const Extended = struct {{\n", .{});
|
||||
|
||||
for (flags_by_bitpos) |maybe_flag_index, bitpos| {
|
||||
const flag_index = maybe_flag_index orelse {
|
||||
try writer.print("_reserved_bit_{}: bool = false,\n", .{bitpos});
|
||||
continue;
|
||||
};
|
||||
const enumerant = enumerants[flag_index];
|
||||
|
||||
try renderOperand(writer, .mask, enumerant.enumerant, enumerant.parameters, extended_structs);
|
||||
}
|
||||
|
||||
try writer.writeAll("};\n};\n");
|
||||
}
|
||||
|
||||
fn renderOperand(
|
||||
writer: anytype,
|
||||
kind: enum {
|
||||
@"union",
|
||||
instruction,
|
||||
mask,
|
||||
},
|
||||
field_name: []const u8,
|
||||
parameters: []const g.Operand,
|
||||
extended_structs: ExtendedStructSet,
|
||||
) !void {
|
||||
if (kind == .instruction) {
|
||||
try writer.writeByte('.');
|
||||
}
|
||||
try writer.print("{}", .{std.zig.fmtId(field_name)});
|
||||
if (parameters.len == 0) {
|
||||
switch (kind) {
|
||||
.@"union" => try writer.writeAll(",\n"),
|
||||
.instruction => try writer.writeAll(" => void,\n"),
|
||||
.mask => try writer.writeAll(": bool = false,\n"),
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (kind == .instruction) {
|
||||
try writer.writeAll(" => ");
|
||||
} else {
|
||||
try writer.writeAll(": ");
|
||||
}
|
||||
|
||||
if (kind == .mask) {
|
||||
try writer.writeByte('?');
|
||||
}
|
||||
|
||||
try writer.writeAll("struct{");
|
||||
|
||||
for (parameters) |param, j| {
|
||||
if (j != 0) {
|
||||
try writer.writeAll(", ");
|
||||
}
|
||||
|
||||
try renderFieldName(writer, parameters, j);
|
||||
try writer.writeAll(": ");
|
||||
|
||||
if (param.quantifier) |q| {
|
||||
switch (q) {
|
||||
.@"?" => try writer.writeByte('?'),
|
||||
.@"*" => try writer.writeAll("[]const "),
|
||||
}
|
||||
}
|
||||
|
||||
try writer.print("{}", .{std.zig.fmtId(param.kind)});
|
||||
|
||||
if (extended_structs.contains(param.kind)) {
|
||||
try writer.writeAll(".Extended");
|
||||
}
|
||||
|
||||
if (param.quantifier) |q| {
|
||||
switch (q) {
|
||||
.@"?" => try writer.writeAll(" = null"),
|
||||
.@"*" => try writer.writeAll(" = &.{}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try writer.writeAll("}");
|
||||
|
||||
if (kind == .mask) {
|
||||
try writer.writeAll(" = null");
|
||||
}
|
||||
|
||||
try writer.writeAll(",\n");
|
||||
}
|
||||
|
||||
fn renderFieldName(writer: anytype, operands: []const g.Operand, field_index: usize) !void {
|
||||
const operand = operands[field_index];
|
||||
|
||||
// Should be enough for all names - adjust as needed.
|
||||
var name_buffer = std.BoundedArray(u8, 64){
|
||||
.buffer = undefined,
|
||||
};
|
||||
|
||||
derive_from_kind: {
|
||||
// Operand names are often in the json encoded as "'Name'" (with two sets of quotes).
|
||||
// Additionally, some operands have ~ in them at the end (D~ref~).
|
||||
const name = std.mem.trim(u8, operand.name, "'~");
|
||||
if (name.len == 0) {
|
||||
break :derive_from_kind;
|
||||
}
|
||||
|
||||
// Some names have weird characters in them (like newlines) - skip any such ones.
|
||||
// Use the same loop to transform to snake-case.
|
||||
for (name) |c| {
|
||||
switch (c) {
|
||||
'a'...'z', '0'...'9' => try name_buffer.append(c),
|
||||
'A'...'Z' => try name_buffer.append(std.ascii.toLower(c)),
|
||||
' ', '~' => try name_buffer.append('_'),
|
||||
else => break :derive_from_kind,
|
||||
}
|
||||
}
|
||||
|
||||
// Assume there are no duplicate 'name' fields.
|
||||
try writer.print("{}", .{std.zig.fmtId(name_buffer.slice())});
|
||||
return;
|
||||
}
|
||||
|
||||
// Translate to snake case.
|
||||
name_buffer.len = 0;
|
||||
for (operand.kind) |c, i| {
|
||||
switch (c) {
|
||||
'a'...'z', '0'...'9' => try name_buffer.append(c),
|
||||
'A'...'Z' => if (i > 0 and std.ascii.isLower(operand.kind[i - 1])) {
|
||||
try name_buffer.appendSlice(&[_]u8{ '_', std.ascii.toLower(c) });
|
||||
} else {
|
||||
try name_buffer.append(std.ascii.toLower(c));
|
||||
},
|
||||
else => unreachable, // Assume that the name is valid C-syntax (and contains no underscores).
|
||||
}
|
||||
}
|
||||
|
||||
try writer.print("{}", .{std.zig.fmtId(name_buffer.slice())});
|
||||
|
||||
// For fields derived from type name, there could be any amount.
|
||||
// Simply check against all other fields, and if another similar one exists, add a number.
|
||||
const need_extra_index = for (operands) |other_operand, i| {
|
||||
if (i != field_index and std.mem.eql(u8, operand.kind, other_operand.kind)) {
|
||||
break true;
|
||||
}
|
||||
} else false;
|
||||
|
||||
if (need_extra_index) {
|
||||
try writer.print("_{}", .{field_index});
|
||||
}
|
||||
}
|
||||
|
||||
fn parseHexInt(text: []const u8) !u31 {
|
||||
@ -142,7 +514,7 @@ fn usageAndExit(file: std.fs.File, arg0: []const u8, code: u8) noreturn {
|
||||
\\
|
||||
\\Generates Zig bindings for a SPIR-V specification .json (either core or
|
||||
\\extinst versions). The result, printed to stdout, should be used to update
|
||||
\\files in src/codegen/spirv.
|
||||
\\files in src/codegen/spirv. Don't forget to format the output.
|
||||
\\
|
||||
\\The relevant specifications can be obtained from the SPIR-V registry:
|
||||
\\https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user