Merge pull request #22889 from alichraghi/ali_spv

spirv: miscellaneous stuff
This commit is contained in:
Robin Voetter 2025-02-18 21:14:02 +01:00 committed by GitHub
commit 4720a79477
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 523 additions and 2843 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,4 @@
const std = @import("std.zig");
const comptimePrint = std.fmt.comptimePrint;
/// Will make `ptr` contain the location of the current invocation within the
/// global workgroup. Each component is equal to the index of the local workgroup
@ -81,23 +80,23 @@ pub fn fragmentDepth(comptime ptr: *addrspace(.output) f32) void {
/// Forms the main linkage for `input` and `output` address spaces.
/// `ptr` must be a reference to variable or struct field.
pub fn location(comptime ptr: anytype, comptime loc: u32) void {
const code = comptimePrint("OpDecorate %ptr Location {}", .{loc});
asm volatile (code
asm volatile ("OpDecorate %ptr Location $loc"
:
: [ptr] "" (ptr),
[loc] "c" (loc),
);
}
/// Forms the main linkage for `input` and `output` address spaces.
/// `ptr` must be a reference to variable or struct field.
pub fn binding(comptime ptr: anytype, comptime group: u32, comptime bind: u32) void {
const code = comptimePrint(
\\OpDecorate %ptr DescriptorSet {}
\\OpDecorate %ptr Binding {}
, .{ group, bind });
asm volatile (code
pub fn binding(comptime ptr: anytype, comptime set: u32, comptime bind: u32) void {
asm volatile (
\\OpDecorate %ptr DescriptorSet $set
\\OpDecorate %ptr Binding $bind
:
: [ptr] "" (ptr),
[set] "c" (set),
[bind] "c" (bind),
);
}
@ -111,13 +110,10 @@ pub const Origin = enum(u32) {
/// The coordinates appear to originate in the specified `origin`.
/// Only valid with the `Fragment` calling convention.
pub fn fragmentOrigin(comptime entry_point: anytype, comptime origin: Origin) void {
const origin_enum = switch (origin) {
.upper_left => .OriginUpperLeft,
.lower_left => .OriginLowerLeft,
};
asm volatile ("OpExecutionMode %entry_point " ++ @tagName(origin_enum)
asm volatile ("OpExecutionMode %entry_point $origin"
:
: [entry_point] "" (entry_point),
[origin] "c" (@intFromEnum(origin)),
);
}
@ -141,37 +137,33 @@ pub const DepthMode = enum(u32) {
/// Only valid with the `Fragment` calling convention.
pub fn depthMode(comptime entry_point: anytype, comptime mode: DepthMode) void {
const code = comptimePrint("OpExecutionMode %entry_point {}", .{@intFromEnum(mode)});
asm volatile (code
asm volatile ("OpExecutionMode %entry_point $mode"
:
: [entry_point] "" (entry_point),
[mode] "c" (mode),
);
}
/// Indicates the workgroup size in the `x`, `y`, and `z` dimensions.
/// Only valid with the `GLCompute` or `Kernel` calling conventions.
pub fn workgroupSize(comptime entry_point: anytype, comptime size: @Vector(3, u32)) void {
const code = comptimePrint("OpExecutionMode %entry_point LocalSize {} {} {}", .{
size[0],
size[1],
size[2],
});
asm volatile (code
asm volatile ("OpExecutionMode %entry_point LocalSize %x %y %z"
:
: [entry_point] "" (entry_point),
[x] "c" (size[0]),
[y] "c" (size[1]),
[z] "c" (size[2]),
);
}
/// A hint to the client, which indicates the workgroup size in the `x`, `y`, and `z` dimensions.
/// Only valid with the `GLCompute` or `Kernel` calling conventions.
pub fn workgroupSizeHint(comptime entry_point: anytype, comptime size: @Vector(3, u32)) void {
const code = comptimePrint("OpExecutionMode %entry_point LocalSizeHint {} {} {}", .{
size[0],
size[1],
size[2],
});
asm volatile (code
asm volatile ("OpExecutionMode %entry_point LocalSizeHint %x %y %z"
:
: [entry_point] "" (entry_point),
[x] "c" (size[0]),
[y] "c" (size[1]),
[z] "c" (size[2]),
);
}

File diff suppressed because it is too large Load Diff

View File

@ -135,6 +135,9 @@ const AsmValue = union(enum) {
/// This is a pre-supplied constant integer value.
constant: u32,
/// This is a pre-supplied constant string value.
string: []const u8,
/// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
/// is of a variant that allows the result to be obtained (not an unresolved
/// forward declaration, not in the process of being declared, etc).
@ -144,6 +147,7 @@ const AsmValue = union(enum) {
.unresolved_forward_reference,
// TODO: Lower this value as constant?
.constant,
.string,
=> unreachable,
.value => |result| result,
.ty => |result| result,
@ -274,6 +278,16 @@ fn processInstruction(self: *Assembler) !void {
.OpEntryPoint => {
return self.fail(0, "cannot export entry points via OpEntryPoint, export the kernel using callconv(.Kernel)", .{});
},
.OpCapability => {
try self.spv.addCapability(@enumFromInt(self.inst.operands.items[0].value));
return;
},
.OpExtension => {
const ext_name_offset = self.inst.operands.items[0].string;
const ext_name = std.mem.sliceTo(self.inst.string_bytes.items[ext_name_offset..], 0);
try self.spv.addExtension(ext_name);
return;
},
.OpExtInstImport => blk: {
const set_name_offset = self.inst.operands.items[1].string;
const set_name = std.mem.sliceTo(self.inst.string_bytes.items[set_name_offset..], 0);
@ -635,6 +649,28 @@ fn parseBitEnum(self: *Assembler, kind: spec.OperandKind) !void {
/// Also handles parsing any required extra operands.
fn parseValueEnum(self: *Assembler, kind: spec.OperandKind) !void {
const tok = self.currentToken();
if (self.eatToken(.placeholder)) {
const name = self.tokenText(tok)[1..];
const value = self.value_map.get(name) orelse {
return self.fail(tok.start, "invalid placeholder '${s}'", .{name});
};
switch (value) {
.constant => |literal32| {
try self.inst.operands.append(self.gpa, .{ .value = literal32 });
},
.string => |str| {
const enumerant = for (kind.enumerants()) |enumerant| {
if (std.mem.eql(u8, enumerant.name, str)) break enumerant;
} else {
return self.fail(tok.start, "'{s}' is not a valid value for enumeration {s}", .{ str, @tagName(kind) });
};
try self.inst.operands.append(self.gpa, .{ .value = enumerant.value });
},
else => return self.fail(tok.start, "value '{s}' cannot be used as placeholder", .{name}),
}
return;
}
try self.expectToken(.value);
const text = self.tokenText(tok);

View File

@ -10,6 +10,8 @@ const Module = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const autoHashStrat = std.hash.autoHashStrat;
const Wyhash = std.hash.Wyhash;
const spec = @import("spec.zig");
const Word = spec.Word;
@ -19,6 +21,19 @@ const IdResultType = spec.IdResultType;
const Section = @import("Section.zig");
/// Helper HashMap type to hash deeply
fn DeepHashMap(K: type, V: type) type {
return std.HashMapUnmanaged(K, V, struct {
pub fn hash(ctx: @This(), key: K) u64 {
_ = ctx;
var hasher = Wyhash.init(0);
autoHashStrat(&hasher, key, .Deep);
return hasher.final();
}
pub const eql = std.hash_map.getAutoEqlFn(K, @This());
}, std.hash_map.default_max_load_percentage);
}
/// This structure represents a function that isc in-progress of being emitted.
/// Commonly, the contents of this structure will be merged with the appropriate
/// sections of the module and re-used. Note that the SPIR-V module system makes
@ -103,6 +118,12 @@ gpa: Allocator,
/// Arena for things that need to live for the length of this program.
arena: std.heap.ArenaAllocator,
/// Target info
target: std.Target,
/// The target SPIR-V version
version: spec.Version,
/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
sections: struct {
/// Capability instructions
@ -159,8 +180,16 @@ cache: struct {
// This cache is required so that @Vector(X, u1) in direct representation has the
// same ID as @Vector(X, bool) in indirect representation.
vector_types: std.AutoHashMapUnmanaged(struct { IdRef, u32 }, IdRef) = .empty,
array_types: std.AutoHashMapUnmanaged(struct { IdRef, IdRef }, IdRef) = .empty,
function_types: DeepHashMap(struct { IdRef, []const IdRef }, IdRef) = .empty,
capabilities: std.AutoHashMapUnmanaged(spec.Capability, void) = .empty,
extensions: std.StringHashMapUnmanaged(void) = .empty,
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
decorations: std.AutoHashMapUnmanaged(struct { IdRef, spec.Decoration }, void) = .empty,
builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty,
bool_const: [2]?IdRef = .{ null, null },
} = .{},
/// Set of Decls, referred to by Decl.Index.
@ -173,13 +202,23 @@ decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
/// The list of entry points that should be exported from this module.
entry_points: std.ArrayListUnmanaged(EntryPoint) = .empty,
/// The list of extended instruction sets that should be imported.
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
pub fn init(gpa: Allocator, target: std.Target) Module {
const version_minor: u8 = blk: {
// Prefer higher versions
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_6)) break :blk 6;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_5)) break :blk 5;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_4)) break :blk 4;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_3)) break :blk 3;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_2)) break :blk 2;
if (std.Target.spirv.featureSetHas(target.cpu.features, .v1_1)) break :blk 1;
break :blk 0;
};
pub fn init(gpa: Allocator) Module {
return .{
.gpa = gpa,
.arena = std.heap.ArenaAllocator.init(gpa),
.target = target,
.version = .{ .major = 1, .minor = version_minor },
.next_result_id = 1, // 0 is an invalid SPIR-V result id, so start counting at 1.
};
}
@ -201,14 +240,18 @@ pub fn deinit(self: *Module) void {
self.cache.int_types.deinit(self.gpa);
self.cache.float_types.deinit(self.gpa);
self.cache.vector_types.deinit(self.gpa);
self.cache.array_types.deinit(self.gpa);
self.cache.function_types.deinit(self.gpa);
self.cache.capabilities.deinit(self.gpa);
self.cache.extensions.deinit(self.gpa);
self.cache.extended_instruction_set.deinit(self.gpa);
self.cache.decorations.deinit(self.gpa);
self.cache.builtins.deinit(self.gpa);
self.decls.deinit(self.gpa);
self.decl_deps.deinit(self.gpa);
self.entry_points.deinit(self.gpa);
self.extended_instruction_set.deinit(self.gpa);
self.arena.deinit();
self.* = undefined;
@ -240,6 +283,10 @@ pub fn idBound(self: Module) Word {
return self.next_result_id;
}
pub fn hasFeature(self: *Module, feature: std.Target.spirv.Feature) bool {
return std.Target.spirv.featureSetHas(self.target.cpu.features, feature);
}
fn addEntryPointDeps(
self: *Module,
decl_index: Decl.Index,
@ -292,25 +339,68 @@ fn entryPoints(self: *Module) !Section {
return entry_points;
}
pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word {
pub fn finalize(self: *Module, a: Allocator) ![]Word {
// Emit capabilities and extensions
for (std.Target.spirv.all_features) |feature| {
if (self.target.cpu.features.isEnabled(feature.index)) {
const feature_tag: std.Target.spirv.Feature = @enumFromInt(feature.index);
switch (feature_tag) {
.v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {},
.int8 => try self.addCapability(.Int8),
.int16 => try self.addCapability(.Int16),
.int64 => try self.addCapability(.Int64),
.float16 => try self.addCapability(.Float16),
.float64 => try self.addCapability(.Float64),
.addresses => if (self.hasFeature(.shader)) {
try self.addCapability(.PhysicalStorageBufferAddresses);
try self.addExtension("SPV_KHR_physical_storage_buffer");
} else {
try self.addCapability(.Addresses);
},
.matrix => try self.addCapability(.Matrix),
.kernel => try self.addCapability(.Kernel),
.generic_pointer => try self.addCapability(.GenericPointer),
.vector16 => try self.addCapability(.Vector16),
.shader => try self.addCapability(.Shader),
}
}
}
// Emit memory model
const addressing_model: spec.AddressingModel = blk: {
if (self.hasFeature(.shader)) {
break :blk switch (self.target.cpu.arch) {
.spirv32 => .Logical, // TODO: I don't think this will ever be implemented.
.spirv64 => .PhysicalStorageBuffer64,
else => unreachable,
};
} else if (self.hasFeature(.kernel)) {
break :blk switch (self.target.cpu.arch) {
.spirv32 => .Physical32,
.spirv64 => .Physical64,
else => unreachable,
};
}
unreachable;
};
try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{
.addressing_model = addressing_model,
.memory_model = switch (self.target.os.tag) {
.opencl => .OpenCL,
.vulkan, .opengl => .GLSL450,
else => unreachable,
},
});
// See SPIR-V Spec section 2.3, "Physical Layout of a SPIR-V Module and Instruction"
// TODO: Audit calls to allocId() in this function to make it idempotent.
var entry_points = try self.entryPoints();
defer entry_points.deinit(self.gpa);
const header = [_]Word{
spec.magic_number,
// TODO: From cpu features
spec.Version.toWord(.{
.major = 1,
.minor = switch (target.os.tag) {
// Emit SPIR-V 1.3 for now. This is the highest version that Vulkan 1.1 supports.
.vulkan => 3,
// Emit SPIR-V 1.4 for now. This is the highest version that Intel's CPU OpenCL supports.
else => 4,
},
}),
self.version.toWord(),
spec.zig_generator_id,
self.idBound(),
0, // Schema (currently reserved for future use)
@ -319,7 +409,7 @@ pub fn finalize(self: *Module, a: Allocator, target: std.Target) ![]Word {
var source = Section{};
defer source.deinit(self.gpa);
try self.sections.debug_strings.emit(self.gpa, .OpSource, .{
.source_language = .Unknown,
.source_language = .Zig,
.version = 0,
// We cannot emit these because the Khronos translator does not parse this instruction
// correctly.
@ -368,11 +458,23 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void {
try self.declareDeclDeps(decl_index, func.decl_deps.keys());
}
pub fn addCapability(self: *Module, cap: spec.Capability) !void {
const entry = try self.cache.capabilities.getOrPut(self.gpa, cap);
if (entry.found_existing) return;
try self.sections.capabilities.emit(self.gpa, .OpCapability, .{ .capability = cap });
}
pub fn addExtension(self: *Module, ext: []const u8) !void {
const entry = try self.cache.extensions.getOrPut(self.gpa, ext);
if (entry.found_existing) return;
try self.sections.extensions.emit(self.gpa, .OpExtension, .{ .name = ext });
}
/// Imports or returns the existing id of an extended instruction set
pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
assert(set != .core);
const gop = try self.extended_instruction_set.getOrPut(self.gpa, set);
const gop = try self.cache.extended_instruction_set.getOrPut(self.gpa, set);
if (gop.found_existing) return gop.value_ptr.*;
const result_id = self.allocId();
@ -477,20 +579,69 @@ pub fn floatType(self: *Module, bits: u16) !IdRef {
return entry.value_ptr.*;
}
pub fn vectorType(self: *Module, len: u32, child_id: IdRef) !IdRef {
const entry = try self.cache.vector_types.getOrPut(self.gpa, .{ child_id, len });
pub fn vectorType(self: *Module, len: u32, child_ty_id: IdRef) !IdRef {
const entry = try self.cache.vector_types.getOrPut(self.gpa, .{ child_ty_id, len });
if (!entry.found_existing) {
const result_id = self.allocId();
entry.value_ptr.* = result_id;
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVector, .{
.id_result = result_id,
.component_type = child_id,
.component_type = child_ty_id,
.component_count = len,
});
}
return entry.value_ptr.*;
}
pub fn arrayType(self: *Module, len_id: IdRef, child_ty_id: IdRef) !IdRef {
const entry = try self.cache.array_types.getOrPut(self.gpa, .{ child_ty_id, len_id });
if (!entry.found_existing) {
const result_id = self.allocId();
entry.value_ptr.* = result_id;
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeArray, .{
.id_result = result_id,
.element_type = child_ty_id,
.length = len_id,
});
}
return entry.value_ptr.*;
}
pub fn functionType(self: *Module, return_ty_id: IdRef, param_type_ids: []const IdRef) !IdRef {
const entry = try self.cache.function_types.getOrPut(self.gpa, .{ return_ty_id, param_type_ids });
if (!entry.found_existing) {
const result_id = self.allocId();
entry.value_ptr.* = result_id;
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeFunction, .{
.id_result = result_id,
.return_type = return_ty_id,
.id_ref_2 = param_type_ids,
});
}
return entry.value_ptr.*;
}
pub fn constBool(self: *Module, value: bool) !IdRef {
if (self.cache.bool_const[@intFromBool(value)]) |b| return b;
const result_ty_id = try self.boolType();
const result_id = self.allocId();
self.cache.bool_const[@intFromBool(value)] = result_id;
switch (value) {
inline else => |value_ct| try self.sections.types_globals_constants.emit(
self.gpa,
if (value_ct) .OpConstantTrue else .OpConstantFalse,
.{
.id_result_type = result_ty_id,
.id_result = result_id,
},
),
}
return result_id;
}
/// Return a pointer to a builtin variable. `result_ty_id` must be a **pointer**
/// with storage class `.Input`.
pub fn builtin(self: *Module, result_ty_id: IdRef, spirv_builtin: spec.BuiltIn) !Decl.Index {
@ -534,13 +685,17 @@ pub fn decorate(
target: IdRef,
decoration: spec.Decoration.Extended,
) !void {
try self.sections.annotations.emit(self.gpa, .OpDecorate, .{
.target = target,
.decoration = decoration,
});
const entry = try self.cache.decorations.getOrPut(self.gpa, .{ target, decoration });
if (!entry.found_existing) {
try self.sections.annotations.emit(self.gpa, .OpDecorate, .{
.target = target,
.decoration = decoration,
});
}
}
/// Decorate a result-id which is a member of some struct.
/// We really don't have to and shouldn't need to cache this.
pub fn decorateMember(
self: *Module,
structure_type: IdRef,

View File

@ -75,7 +75,7 @@ pub fn createEmpty(
.disable_lld_caching = options.disable_lld_caching,
.build_id = options.build_id,
},
.object = codegen.Object.init(gpa),
.object = codegen.Object.init(gpa, comp.getTarget()),
};
errdefer self.deinit();
@ -172,7 +172,7 @@ pub fn updateExports(
const spv_decl_index = try self.object.resolveNav(zcu, nav_index);
const cc = Type.fromInterned(nav_ty).fnCallingConvention(zcu);
const execution_model: spec.ExecutionModel = switch (target.os.tag) {
.vulkan => switch (cc) {
.vulkan, .opengl => switch (cc) {
.spirv_vertex => .Vertex,
.spirv_fragment => .Fragment,
.spirv_kernel => .GLCompute,
@ -231,15 +231,10 @@ pub fn flushModule(
const spv = &self.object.spv;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const target = comp.getTarget();
try writeCapabilities(spv, target);
try writeMemoryModel(spv, target);
// We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old
// nonsemantic instruction. For now, just put it in OpSourceExtension with a special name.
var error_info = std.ArrayList(u8).init(self.object.gpa);
defer error_info.deinit();
@ -269,7 +264,7 @@ pub fn flushModule(
.extension = error_info.items,
});
const module = try spv.finalize(arena, target);
const module = try spv.finalize(arena);
errdefer arena.free(module);
const linked_module = self.linkModule(arena, module, sub_prog_node) catch |err| switch (err) {
@ -298,58 +293,3 @@ fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress
return binary.finalize(a);
}
fn writeCapabilities(spv: *SpvModule, target: std.Target) !void {
const gpa = spv.gpa;
// TODO: Integrate with a hypothetical feature system
const caps: []const spec.Capability = switch (target.os.tag) {
.opencl => &.{ .Kernel, .Addresses, .Int8, .Int16, .Int64, .Float64, .Float16, .Vector16, .GenericPointer },
.vulkan => &.{ .Shader, .PhysicalStorageBufferAddresses, .Int8, .Int16, .Int64, .Float64, .Float16, .VariablePointers, .VariablePointersStorageBuffer },
else => unreachable,
};
for (caps) |cap| {
try spv.sections.capabilities.emit(gpa, .OpCapability, .{
.capability = cap,
});
}
switch (target.os.tag) {
.vulkan => {
try spv.sections.extensions.emit(gpa, .OpExtension, .{
.name = "SPV_KHR_physical_storage_buffer",
});
},
else => {},
}
}
fn writeMemoryModel(spv: *SpvModule, target: std.Target) !void {
const gpa = spv.gpa;
const addressing_model: spec.AddressingModel = switch (target.os.tag) {
.opencl => switch (target.cpu.arch) {
.spirv32 => .Physical32,
.spirv64 => .Physical64,
else => unreachable,
},
.opengl, .vulkan => switch (target.cpu.arch) {
.spirv32 => .Logical, // TODO: I don't think this will ever be implemented.
.spirv64 => .PhysicalStorageBuffer64,
else => unreachable,
},
else => unreachable,
};
const memory_model: spec.MemoryModel = switch (target.os.tag) {
.opencl => .OpenCL,
.opengl => .GLSL450,
.vulkan => .GLSL450,
else => unreachable,
};
try spv.sections.memory_model.emit(gpa, .OpMemoryModel, .{
.addressing_model = addressing_model,
.memory_model = memory_model,
});
}

View File

@ -155,7 +155,7 @@ const ModuleInfo = struct {
}
}
return ModuleInfo{
return .{
.entities = entities.unmanaged,
.operand_is_id = operand_is_id,
// There may be unrelated decorations at the end, so make sure to

View File

@ -166,7 +166,7 @@ const ModuleInfo = struct {
return error.InvalidPhysicalFormat;
}
return ModuleInfo{
return .{
.functions = functions.unmanaged,
.callee_store = callee_store.items,
.result_id_to_code_offset = result_id_to_code_offset.unmanaged,

View File

@ -43,7 +43,6 @@ pub fn build(b: *std.Build) void {
"../../tools/update_clang_options.zig",
"../../tools/update_cpu_features.zig",
"../../tools/update_glibc.zig",
"../../tools/update_spirv_features.zig",
}) |tool_src_path| {
const tool = b.addTest(.{
.name = std.fs.path.stem(tool_src_path),

View File

@ -1072,13 +1072,6 @@ const targets = [_]ArchTarget{
.td_name = "Sparc",
},
},
// TODO: merge tools/update_spirv_features.zig into this script
//.{
// .zig_name = "spirv",
// .llvm = .{
// .name = "SPIRV",
// },
//},
.{
.zig_name = "s390x",
.llvm = .{

View File

@ -1,328 +0,0 @@
//! This tool generates SPIR-V features from the grammar files in the SPIRV-Headers
//! (https://github.com/KhronosGroup/SPIRV-Headers/) and SPIRV-Registry (https://github.com/KhronosGroup/SPIRV-Registry/)
//! repositories. Currently it only generates a basic feature set definition consisting of versions, extensions and capabilities.
//! There is a lot left to be desired, as currently dependencies of extensions and dependencies on extensions aren't generated.
//! This is because there are some peculiarities in the SPIR-V registries:
//! - Capabilities may depend on multiple extensions, which cannot be modelled yet by std.Target.
//! - Extension dependencies are not documented in a machine-readable manner.
//! - Note that the grammar spec also contains definitions from extensions which aren't actually official. Most of these seem to be
//! from an intel project (https://github.com/intel/llvm/, https://github.com/intel/llvm/tree/sycl/sycl/doc/extensions/SPIRV),
//! and so ONLY extensions in the SPIRV-Registry should be included.
const std = @import("std");
const fs = std.fs;
const Allocator = std.mem.Allocator;
const g = @import("spirv/grammar.zig");
const Version = struct {
major: u32,
minor: u32,
fn parse(str: []const u8) !Version {
var it = std.mem.splitScalar(u8, str, '.');
const major = it.first();
const minor = it.next() orelse return error.InvalidVersion;
if (it.next() != null) return error.InvalidVersion;
return Version{
.major = std.fmt.parseInt(u32, major, 10) catch return error.InvalidVersion,
.minor = std.fmt.parseInt(u32, minor, 10) catch return error.InvalidVersion,
};
}
fn eql(a: Version, b: Version) bool {
return a.major == b.major and a.minor == b.minor;
}
fn lessThan(ctx: void, a: Version, b: Version) bool {
_ = ctx;
return if (a.major == b.major)
a.minor < b.minor
else
a.major < b.major;
}
};
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
if (std.mem.eql(u8, args[1], "--help")) {
usageAndExit(std.io.getStdErr(), args[0], 0);
}
if (args.len != 3) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
const spirv_headers_root = args[1];
const spirv_registry_root = args[2];
if (std.mem.startsWith(u8, spirv_headers_root, "-") or std.mem.startsWith(u8, spirv_registry_root, "-")) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
// Required for json parsing.
@setEvalBranchQuota(10000);
const registry_path = try fs.path.join(allocator, &.{ spirv_headers_root, "include", "spirv", "unified1", "spirv.core.grammar.json" });
const registry_json = try std.fs.cwd().readFileAlloc(allocator, registry_path, std.math.maxInt(usize));
var scanner = std.json.Scanner.initCompleteInput(allocator, registry_json);
var diagnostics = std.json.Diagnostics{};
scanner.enableDiagnostics(&diagnostics);
const registry = std.json.parseFromTokenSourceLeaky(g.CoreRegistry, allocator, &scanner, .{}) catch |err| {
std.debug.print("line,col: {},{}\n", .{ diagnostics.getLine(), diagnostics.getColumn() });
return err;
};
const capabilities = for (registry.operand_kinds) |opkind| {
if (std.mem.eql(u8, opkind.kind, "Capability"))
break opkind.enumerants orelse return error.InvalidRegistry;
} else return error.InvalidRegistry;
const extensions = try gather_extensions(allocator, spirv_registry_root);
const versions = try gatherVersions(allocator, registry);
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
const w = bw.writer();
try w.writeAll(
\\//! This file is auto-generated by tools/update_spirv_features.zig.
\\//! TODO: Dependencies of capabilities on extensions.
\\//! TODO: Dependencies of extensions on extensions.
\\//! TODO: Dependencies of extensions on versions.
\\
\\const std = @import("../std.zig");
\\const CpuFeature = std.Target.Cpu.Feature;
\\const CpuModel = std.Target.Cpu.Model;
\\
\\pub const Feature = enum {
\\
);
for (versions) |ver| {
try w.print(" v{}_{},\n", .{ ver.major, ver.minor });
}
for (extensions) |ext| {
try w.print(" {p},\n", .{std.zig.fmtId(ext)});
}
for (capabilities) |cap| {
try w.print(" {p},\n", .{std.zig.fmtId(cap.enumerant)});
}
try w.writeAll(
\\};
\\
\\pub const featureSet = CpuFeature.FeatureSetFns(Feature).featureSet;
\\pub const featureSetHas = CpuFeature.FeatureSetFns(Feature).featureSetHas;
\\pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
\\pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
\\
\\pub const all_features = blk: {
\\ @setEvalBranchQuota(2000);
\\ const len = @typeInfo(Feature).@"enum".fields.len;
\\ std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
\\ var result: [len]CpuFeature = undefined;
\\
);
for (versions, 0..) |ver, i| {
try w.print(
\\ result[@intFromEnum(Feature.v{0}_{1})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V version {0}.{1}",
\\
, .{ ver.major, ver.minor });
if (i == 0) {
try w.writeAll(
\\ .dependencies = featureSet(&[_]Feature{}),
\\ };
\\
);
} else {
try w.print(
\\ .dependencies = featureSet(&[_]Feature{{
\\ .v{}_{},
\\ }}),
\\ }};
\\
, .{ versions[i - 1].major, versions[i - 1].minor });
}
}
// TODO: Extension dependencies.
for (extensions) |ext| {
try w.print(
\\ result[@intFromEnum(Feature.{p_})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V extension {s}",
\\ .dependencies = featureSet(&[_]Feature{{}}),
\\ }};
\\
, .{
std.zig.fmtId(ext),
ext,
});
}
// TODO: Capability extension dependencies.
for (capabilities) |cap| {
try w.print(
\\ result[@intFromEnum(Feature.{p_})] = .{{
\\ .llvm_name = null,
\\ .description = "Enable SPIR-V capability {s}",
\\ .dependencies = featureSet(&[_]Feature{{
\\
, .{
std.zig.fmtId(cap.enumerant),
cap.enumerant,
});
if (cap.version) |ver_str| {
if (!std.mem.eql(u8, ver_str, "None")) {
const ver = try Version.parse(ver_str);
try w.print(" .v{}_{},\n", .{ ver.major, ver.minor });
}
}
for (cap.capabilities) |cap_dep| {
try w.print(" .{p_},\n", .{std.zig.fmtId(cap_dep)});
}
try w.writeAll(
\\ }),
\\ };
\\
);
}
try w.writeAll(
\\ const ti = @typeInfo(Feature);
\\ for (&result, 0..) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.@"enum".fields[i].name;
\\ }
\\ break :blk result;
\\};
\\
);
try bw.flush();
}
/// SPIRV-Registry should hold all extensions currently registered for SPIR-V.
/// The *.grammar.json in SPIRV-Headers should have most of these as well, but with this we're sure to get only the actually
/// registered ones.
/// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" });
var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
defer extensions_dir.close();
var extensions = std.ArrayList([]const u8).init(allocator);
var vendor_it = extensions_dir.iterate();
while (try vendor_it.next()) |vendor_entry| {
std.debug.assert(vendor_entry.kind == .directory); // If this fails, the structure of SPIRV-Registry has changed.
const vendor_dir = try extensions_dir.openDir(vendor_entry.name, .{ .iterate = true });
var ext_it = vendor_dir.iterate();
while (try ext_it.next()) |ext_entry| {
// There is both a HTML and asciidoc version of every spec (as well as some other directories),
// we need just the name, but to avoid duplicates here we will just skip anything thats not asciidoc.
if (!std.mem.endsWith(u8, ext_entry.name, ".asciidoc"))
continue;
// Unfortunately, some extension filenames are incorrect, so we need to look for the string in the 'Name Strings' section.
// This has the following format:
// ```
// Name Strings
// ------------
//
// SPV_EXT_name
// ```
// OR
// ```
// == Name Strings
//
// SPV_EXT_name
// ```
const ext_spec = try vendor_dir.readFileAlloc(allocator, ext_entry.name, std.math.maxInt(usize));
const name_strings = "Name Strings";
const name_strings_offset = std.mem.indexOf(u8, ext_spec, name_strings) orelse return error.InvalidRegistry;
// As the specs are inconsistent on this next part, just skip any newlines/minuses
var ext_start = name_strings_offset + name_strings.len + 1;
while (ext_spec[ext_start] == '\n' or ext_spec[ext_start] == '-') {
ext_start += 1;
}
const ext_end = std.mem.indexOfScalarPos(u8, ext_spec, ext_start, '\n') orelse return error.InvalidRegistry;
const ext = ext_spec[ext_start..ext_end];
std.debug.assert(std.mem.startsWith(u8, ext, "SPV_")); // Sanity check, all extensions should have a name like SPV_VENDOR_extension.
try extensions.append(try allocator.dupe(u8, ext));
}
}
return extensions.items;
}
fn insertVersion(versions: *std.ArrayList(Version), version: ?[]const u8) !void {
const ver_str = version orelse return;
if (std.mem.eql(u8, ver_str, "None"))
return;
const ver = try Version.parse(ver_str);
for (versions.items) |existing_ver| {
if (ver.eql(existing_ver)) return;
}
try versions.append(ver);
}
fn gatherVersions(allocator: Allocator, registry: g.CoreRegistry) ![]const Version {
// Expected number of versions is small
var versions = std.ArrayList(Version).init(allocator);
for (registry.instructions) |inst| {
try insertVersion(&versions, inst.version);
}
for (registry.operand_kinds) |opkind| {
const enumerants = opkind.enumerants orelse continue;
for (enumerants) |enumerant| {
try insertVersion(&versions, enumerant.version);
}
}
std.mem.sort(Version, versions.items, {}, Version.lessThan);
return versions.items;
}
fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn {
file.writer().print(
\\Usage: {s} /path/git/SPIRV-Headers /path/git/SPIRV-Registry
\\
\\Prints to stdout Zig code which can be used to replace the file lib/std/target/spirv.zig.
\\
\\SPIRV-Headers can be cloned from https://github.com/KhronosGroup/SPIRV-Headers,
\\SPIRV-Registry can be cloned from https://github.com/KhronosGroup/SPIRV-Registry.
\\
, .{arg0}) catch std.process.exit(1);
std.process.exit(code);
}