mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
remove spirv cpu arch
This commit is contained in:
parent
c71bb0f2b6
commit
1df79ab895
3
lib/compiler/aro/aro/target.zig
vendored
3
lib/compiler/aro/aro/target.zig
vendored
@ -486,7 +486,6 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
|
|||||||
.kalimba,
|
.kalimba,
|
||||||
.lanai,
|
.lanai,
|
||||||
.wasm32,
|
.wasm32,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.loongarch32,
|
.loongarch32,
|
||||||
.xtensa,
|
.xtensa,
|
||||||
@ -554,7 +553,6 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
|
|||||||
.powerpcle => copy.cpu.arch = .powerpc64le,
|
.powerpcle => copy.cpu.arch = .powerpc64le,
|
||||||
.riscv32 => copy.cpu.arch = .riscv64,
|
.riscv32 => copy.cpu.arch = .riscv64,
|
||||||
.sparc => copy.cpu.arch = .sparc64,
|
.sparc => copy.cpu.arch = .sparc64,
|
||||||
.spirv => copy.cpu.arch = .spirv64,
|
|
||||||
.spirv32 => copy.cpu.arch = .spirv64,
|
.spirv32 => copy.cpu.arch = .spirv64,
|
||||||
.thumb => copy.cpu.arch = .aarch64,
|
.thumb => copy.cpu.arch = .aarch64,
|
||||||
.thumbeb => copy.cpu.arch = .aarch64_be,
|
.thumbeb => copy.cpu.arch = .aarch64_be,
|
||||||
@ -609,7 +607,6 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
|||||||
.xtensa => "xtensa",
|
.xtensa => "xtensa",
|
||||||
.nvptx => "nvptx",
|
.nvptx => "nvptx",
|
||||||
.nvptx64 => "nvptx64",
|
.nvptx64 => "nvptx64",
|
||||||
.spirv => "spirv",
|
|
||||||
.spirv32 => "spirv32",
|
.spirv32 => "spirv32",
|
||||||
.spirv64 => "spirv64",
|
.spirv64 => "spirv64",
|
||||||
.kalimba => "kalimba",
|
.kalimba => "kalimba",
|
||||||
|
|||||||
@ -120,7 +120,6 @@ pub fn F16T(comptime OtherType: type) type {
|
|||||||
.nvptx64,
|
.nvptx64,
|
||||||
.riscv32,
|
.riscv32,
|
||||||
.riscv64,
|
.riscv64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
=> f16,
|
=> f16,
|
||||||
|
|||||||
@ -1066,7 +1066,7 @@ pub const ObjectFormat = enum {
|
|||||||
.uefi, .windows => .coff,
|
.uefi, .windows => .coff,
|
||||||
.zos => .goff,
|
.zos => .goff,
|
||||||
else => switch (arch) {
|
else => switch (arch) {
|
||||||
.spirv, .spirv32, .spirv64 => .spirv,
|
.spirv32, .spirv64 => .spirv,
|
||||||
.wasm32, .wasm64 => .wasm,
|
.wasm32, .wasm64 => .wasm,
|
||||||
else => .elf,
|
else => .elf,
|
||||||
},
|
},
|
||||||
@ -1106,7 +1106,6 @@ pub fn toElfMachine(target: *const Target) std.elf.EM {
|
|||||||
|
|
||||||
.nvptx,
|
.nvptx,
|
||||||
.nvptx64,
|
.nvptx64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.wasm32,
|
.wasm32,
|
||||||
@ -1155,7 +1154,6 @@ pub fn toCoffMachine(target: *const Target) std.coff.MachineType {
|
|||||||
.s390x,
|
.s390x,
|
||||||
.sparc,
|
.sparc,
|
||||||
.sparc64,
|
.sparc64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.ve,
|
.ve,
|
||||||
@ -1368,7 +1366,6 @@ pub const Cpu = struct {
|
|||||||
s390x,
|
s390x,
|
||||||
sparc,
|
sparc,
|
||||||
sparc64,
|
sparc64,
|
||||||
spirv,
|
|
||||||
spirv32,
|
spirv32,
|
||||||
spirv64,
|
spirv64,
|
||||||
ve,
|
ve,
|
||||||
@ -1454,7 +1451,7 @@ pub const Cpu = struct {
|
|||||||
.riscv32, .riscv64 => .riscv,
|
.riscv32, .riscv64 => .riscv,
|
||||||
.s390x => .s390x,
|
.s390x => .s390x,
|
||||||
.sparc, .sparc64 => .sparc,
|
.sparc, .sparc64 => .sparc,
|
||||||
.spirv, .spirv32, .spirv64 => .spirv,
|
.spirv32, .spirv64 => .spirv,
|
||||||
.ve => .ve,
|
.ve => .ve,
|
||||||
.wasm32, .wasm64 => .wasm,
|
.wasm32, .wasm64 => .wasm,
|
||||||
.x86, .x86_64 => .x86,
|
.x86, .x86_64 => .x86,
|
||||||
@ -1558,7 +1555,7 @@ pub const Cpu = struct {
|
|||||||
|
|
||||||
pub inline fn isSpirV(arch: Arch) bool {
|
pub inline fn isSpirV(arch: Arch) bool {
|
||||||
return switch (arch) {
|
return switch (arch) {
|
||||||
.spirv, .spirv32, .spirv64 => true,
|
.spirv32, .spirv64 => true,
|
||||||
else => false,
|
else => false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -1614,7 +1611,6 @@ pub const Cpu = struct {
|
|||||||
.thumb,
|
.thumb,
|
||||||
.ve,
|
.ve,
|
||||||
// GPU bitness is opaque. For now, assume little endian.
|
// GPU bitness is opaque. For now, assume little endian.
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.loongarch32,
|
.loongarch32,
|
||||||
@ -1843,7 +1839,7 @@ pub const Cpu = struct {
|
|||||||
.spirv_kernel,
|
.spirv_kernel,
|
||||||
.spirv_fragment,
|
.spirv_fragment,
|
||||||
.spirv_vertex,
|
.spirv_vertex,
|
||||||
=> &.{ .spirv, .spirv32, .spirv64 },
|
=> &.{ .spirv32, .spirv64 },
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -2638,7 +2634,6 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 {
|
|||||||
.sparc64,
|
.sparc64,
|
||||||
.s390x,
|
.s390x,
|
||||||
.ve,
|
.ve,
|
||||||
.spirv,
|
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.loongarch64,
|
.loongarch64,
|
||||||
=> 64,
|
=> 64,
|
||||||
@ -3157,7 +3152,6 @@ pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 {
|
|||||||
.riscv32,
|
.riscv32,
|
||||||
.riscv64,
|
.riscv64,
|
||||||
.sparc64,
|
.sparc64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.x86_64,
|
.x86_64,
|
||||||
@ -3250,7 +3244,6 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 {
|
|||||||
.riscv32,
|
.riscv32,
|
||||||
.riscv64,
|
.riscv64,
|
||||||
.sparc64,
|
.sparc64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.x86_64,
|
.x86_64,
|
||||||
@ -3319,7 +3312,6 @@ pub fn cMaxIntAlignment(target: *const Target) u16 {
|
|||||||
.loongarch32,
|
.loongarch32,
|
||||||
.loongarch64,
|
.loongarch64,
|
||||||
.m68k,
|
.m68k,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.ve,
|
.ve,
|
||||||
@ -3389,7 +3381,7 @@ pub fn cCallingConvention(target: *const Target) ?std.builtin.CallingConvention
|
|||||||
.xtensa => .{ .xtensa_call0 = .{} },
|
.xtensa => .{ .xtensa_call0 = .{} },
|
||||||
.amdgcn => .{ .amdgcn_device = .{} },
|
.amdgcn => .{ .amdgcn_device = .{} },
|
||||||
.nvptx, .nvptx64 => .nvptx_device,
|
.nvptx, .nvptx64 => .nvptx_device,
|
||||||
.spirv, .spirv32, .spirv64 => .spirv_device,
|
.spirv32, .spirv64 => .spirv_device,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -5,16 +5,11 @@ const CpuFeature = std.Target.Cpu.Feature;
|
|||||||
const CpuModel = std.Target.Cpu.Model;
|
const CpuModel = std.Target.Cpu.Model;
|
||||||
|
|
||||||
pub const Feature = enum {
|
pub const Feature = enum {
|
||||||
addresses,
|
|
||||||
arbitrary_precision_integers,
|
arbitrary_precision_integers,
|
||||||
float16,
|
float16,
|
||||||
float64,
|
float64,
|
||||||
generic_pointer,
|
generic_pointer,
|
||||||
int64,
|
int64,
|
||||||
kernel,
|
|
||||||
matrix,
|
|
||||||
physical_storage_buffer,
|
|
||||||
shader,
|
|
||||||
storage_push_constant16,
|
storage_push_constant16,
|
||||||
v1_0,
|
v1_0,
|
||||||
v1_1,
|
v1_1,
|
||||||
@ -37,13 +32,6 @@ pub const all_features = blk: {
|
|||||||
const len = @typeInfo(Feature).@"enum".fields.len;
|
const len = @typeInfo(Feature).@"enum".fields.len;
|
||||||
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
|
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
|
||||||
var result: [len]CpuFeature = undefined;
|
var result: [len]CpuFeature = undefined;
|
||||||
result[@intFromEnum(Feature.addresses)] = .{
|
|
||||||
.llvm_name = null,
|
|
||||||
.description = "Enable Addresses capability",
|
|
||||||
.dependencies = featureSet(&[_]Feature{
|
|
||||||
.v1_0,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
result[@intFromEnum(Feature.arbitrary_precision_integers)] = .{
|
result[@intFromEnum(Feature.arbitrary_precision_integers)] = .{
|
||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.description = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability",
|
.description = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability",
|
||||||
@ -69,7 +57,7 @@ pub const all_features = blk: {
|
|||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.description = "Enable GenericPointer capability",
|
.description = "Enable GenericPointer capability",
|
||||||
.dependencies = featureSet(&[_]Feature{
|
.dependencies = featureSet(&[_]Feature{
|
||||||
.addresses,
|
.v1_0,
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
result[@intFromEnum(Feature.int64)] = .{
|
result[@intFromEnum(Feature.int64)] = .{
|
||||||
@ -79,34 +67,6 @@ pub const all_features = blk: {
|
|||||||
.v1_0,
|
.v1_0,
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
result[@intFromEnum(Feature.kernel)] = .{
|
|
||||||
.llvm_name = null,
|
|
||||||
.description = "Enable Kernel capability",
|
|
||||||
.dependencies = featureSet(&[_]Feature{
|
|
||||||
.v1_0,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
result[@intFromEnum(Feature.matrix)] = .{
|
|
||||||
.llvm_name = null,
|
|
||||||
.description = "Enable Matrix capability",
|
|
||||||
.dependencies = featureSet(&[_]Feature{
|
|
||||||
.v1_0,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
result[@intFromEnum(Feature.physical_storage_buffer)] = .{
|
|
||||||
.llvm_name = null,
|
|
||||||
.description = "Enable SPV_KHR_variable_pointers extension and the (VariablePointers, VariablePointersStorageBuffer) capabilities",
|
|
||||||
.dependencies = featureSet(&[_]Feature{
|
|
||||||
.v1_0,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
result[@intFromEnum(Feature.shader)] = .{
|
|
||||||
.llvm_name = null,
|
|
||||||
.description = "Enable Shader capability",
|
|
||||||
.dependencies = featureSet(&[_]Feature{
|
|
||||||
.matrix,
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
result[@intFromEnum(Feature.storage_push_constant16)] = .{
|
result[@intFromEnum(Feature.storage_push_constant16)] = .{
|
||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.description = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability",
|
.description = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability",
|
||||||
@ -172,7 +132,7 @@ pub const all_features = blk: {
|
|||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.description = "Enable Vector16 capability",
|
.description = "Enable Vector16 capability",
|
||||||
.dependencies = featureSet(&[_]Feature{
|
.dependencies = featureSet(&[_]Feature{
|
||||||
.kernel,
|
.v1_0,
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
const ti = @typeInfo(Feature);
|
const ti = @typeInfo(Feature);
|
||||||
@ -193,8 +153,6 @@ pub const cpu = struct {
|
|||||||
.name = "opencl_v2",
|
.name = "opencl_v2",
|
||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.features = featureSet(&[_]Feature{
|
.features = featureSet(&[_]Feature{
|
||||||
.generic_pointer,
|
|
||||||
.kernel,
|
|
||||||
.v1_2,
|
.v1_2,
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
@ -202,7 +160,6 @@ pub const cpu = struct {
|
|||||||
.name = "vulkan_v1_2",
|
.name = "vulkan_v1_2",
|
||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.features = featureSet(&[_]Feature{
|
.features = featureSet(&[_]Feature{
|
||||||
.shader,
|
|
||||||
.v1_5,
|
.v1_5,
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
|
|||||||
@ -189,7 +189,7 @@ pub const CallingConvention = union(enum(u8)) {
|
|||||||
pub const kernel: CallingConvention = switch (builtin.target.cpu.arch) {
|
pub const kernel: CallingConvention = switch (builtin.target.cpu.arch) {
|
||||||
.amdgcn => .amdgcn_kernel,
|
.amdgcn => .amdgcn_kernel,
|
||||||
.nvptx, .nvptx64 => .nvptx_kernel,
|
.nvptx, .nvptx64 => .nvptx_kernel,
|
||||||
.spirv, .spirv32, .spirv64 => .spirv_kernel,
|
.spirv32, .spirv64 => .spirv_kernel,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -14,7 +14,6 @@ pub fn supportsUnwinding(target: *const std.Target) bool {
|
|||||||
.amdgcn,
|
.amdgcn,
|
||||||
.nvptx,
|
.nvptx,
|
||||||
.nvptx64,
|
.nvptx64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
=> false,
|
=> false,
|
||||||
|
|||||||
@ -26289,7 +26289,7 @@ fn zirWorkItem(
|
|||||||
|
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
// TODO: Allow for other GPU targets.
|
// TODO: Allow for other GPU targets.
|
||||||
.amdgcn, .spirv, .spirv64, .spirv32, .nvptx, .nvptx64 => {},
|
.amdgcn, .spirv64, .spirv32, .nvptx, .nvptx64 => {},
|
||||||
else => {
|
else => {
|
||||||
return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)});
|
return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)});
|
||||||
},
|
},
|
||||||
|
|||||||
@ -3935,7 +3935,6 @@ pub fn atomicPtrAlignment(
|
|||||||
.s390x,
|
.s390x,
|
||||||
.wasm64,
|
.wasm64,
|
||||||
.ve,
|
.ve,
|
||||||
.spirv,
|
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.loongarch64,
|
.loongarch64,
|
||||||
=> 64,
|
=> 64,
|
||||||
|
|||||||
@ -37,7 +37,7 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
|
|||||||
.stage2_powerpc => .powerpc_backend,
|
.stage2_powerpc => .powerpc_backend,
|
||||||
.stage2_riscv64 => .riscv64_backend,
|
.stage2_riscv64 => .riscv64_backend,
|
||||||
.stage2_sparc64 => .sparc64_backend,
|
.stage2_sparc64 => .sparc64_backend,
|
||||||
.stage2_spirv => .spirv64_backend,
|
.stage2_spirv => .spirv_backend,
|
||||||
.stage2_wasm => .wasm_backend,
|
.stage2_wasm => .wasm_backend,
|
||||||
.stage2_x86 => .x86_backend,
|
.stage2_x86 => .x86_backend,
|
||||||
.stage2_x86_64 => .x86_64_backend,
|
.stage2_x86_64 => .x86_64_backend,
|
||||||
|
|||||||
@ -93,8 +93,10 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8
|
|||||||
.xtensa => "xtensa",
|
.xtensa => "xtensa",
|
||||||
.nvptx => "nvptx",
|
.nvptx => "nvptx",
|
||||||
.nvptx64 => "nvptx64",
|
.nvptx64 => "nvptx64",
|
||||||
.spirv => "spirv",
|
.spirv32 => switch (target.os.tag) {
|
||||||
.spirv32 => "spirv32",
|
.vulkan, .opengl => "spirv",
|
||||||
|
else => "spirv32",
|
||||||
|
},
|
||||||
.spirv64 => "spirv64",
|
.spirv64 => "spirv64",
|
||||||
.lanai => "lanai",
|
.lanai => "lanai",
|
||||||
.wasm32 => "wasm32",
|
.wasm32 => "wasm32",
|
||||||
@ -150,9 +152,6 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8
|
|||||||
.powerpc => subArchName(target, .powerpc, .{
|
.powerpc => subArchName(target, .powerpc, .{
|
||||||
.{ .spe, "spe" },
|
.{ .spe, "spe" },
|
||||||
}),
|
}),
|
||||||
.spirv => subArchName(target, .spirv, .{
|
|
||||||
.{ .v1_5, "1.5" },
|
|
||||||
}),
|
|
||||||
.spirv32, .spirv64 => subArchName(target, .spirv, .{
|
.spirv32, .spirv64 => subArchName(target, .spirv, .{
|
||||||
.{ .v1_5, "1.5" },
|
.{ .v1_5, "1.5" },
|
||||||
.{ .v1_4, "1.4" },
|
.{ .v1_4, "1.4" },
|
||||||
@ -441,8 +440,10 @@ pub fn dataLayout(target: *const std.Target) []const u8 {
|
|||||||
else
|
else
|
||||||
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
|
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
|
||||||
},
|
},
|
||||||
.spirv => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1",
|
.spirv32 => switch (target.os.tag) {
|
||||||
.spirv32 => "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1",
|
.vulkan, .opengl => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1",
|
||||||
|
else => "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1",
|
||||||
|
},
|
||||||
.spirv64 => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1",
|
.spirv64 => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1",
|
||||||
.wasm32 => if (target.os.tag == .emscripten)
|
.wasm32 => if (target.os.tag == .emscripten)
|
||||||
"e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-i128:128-f128:64-n32:64-S128-ni:1:10:20"
|
"e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-i128:128-f128:64-n32:64-S128-ni:1:10:20"
|
||||||
@ -13129,7 +13130,6 @@ pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
|
|||||||
llvm.LLVMInitializeLoongArchAsmPrinter();
|
llvm.LLVMInitializeLoongArchAsmPrinter();
|
||||||
llvm.LLVMInitializeLoongArchAsmParser();
|
llvm.LLVMInitializeLoongArchAsmParser();
|
||||||
},
|
},
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
=> {
|
=> {
|
||||||
|
|||||||
@ -439,7 +439,7 @@ const NavGen = struct {
|
|||||||
fn importExtendedSet(self: *NavGen) !IdResult {
|
fn importExtendedSet(self: *NavGen) !IdResult {
|
||||||
const target = self.spv.target;
|
const target = self.spv.target;
|
||||||
return switch (target.os.tag) {
|
return switch (target.os.tag) {
|
||||||
.opencl => try self.spv.importInstructionSet(.@"OpenCL.std"),
|
.opencl, .amdhsa => try self.spv.importInstructionSet(.@"OpenCL.std"),
|
||||||
.vulkan, .opengl => try self.spv.importInstructionSet(.@"GLSL.std.450"),
|
.vulkan, .opengl => try self.spv.importInstructionSet(.@"GLSL.std.450"),
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
@ -561,7 +561,7 @@ const NavGen = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef {
|
fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef {
|
||||||
if (self.spv.hasFeature(.kernel)) {
|
if (self.spv.hasFeature(.generic_pointer)) {
|
||||||
const result_id = self.spv.allocId();
|
const result_id = self.spv.allocId();
|
||||||
try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
|
try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
|
||||||
.id_result_type = type_id,
|
.id_result_type = type_id,
|
||||||
@ -601,16 +601,18 @@ const NavGen = struct {
|
|||||||
|
|
||||||
// We require Int8 and Int16 capabilities and benefit Int64 when available.
|
// We require Int8 and Int16 capabilities and benefit Int64 when available.
|
||||||
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
|
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
|
||||||
const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{
|
const ints = [_]struct { bits: u16, enabled: bool }{
|
||||||
.{ .bits = 8, .feature = null },
|
.{ .bits = 8, .enabled = true },
|
||||||
.{ .bits = 16, .feature = null },
|
.{ .bits = 16, .enabled = true },
|
||||||
.{ .bits = 32, .feature = null },
|
.{ .bits = 32, .enabled = true },
|
||||||
.{ .bits = 64, .feature = .int64 },
|
.{
|
||||||
|
.bits = 64,
|
||||||
|
.enabled = self.spv.hasFeature(.int64) or self.spv.target.cpu.arch == .spirv64,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
for (ints) |int| {
|
for (ints) |int| {
|
||||||
const has_feature = if (int.feature) |feature| self.spv.hasFeature(feature) else true;
|
if (bits <= int.bits and int.enabled) return .{ int.bits, false };
|
||||||
if (bits <= int.bits and has_feature) return .{ int.bits, false };
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Big int
|
// Big int
|
||||||
@ -624,7 +626,10 @@ const NavGen = struct {
|
|||||||
/// is no way of knowing whether those are actually supported.
|
/// is no way of knowing whether those are actually supported.
|
||||||
/// TODO: Maybe this should be cached?
|
/// TODO: Maybe this should be cached?
|
||||||
fn largestSupportedIntBits(self: *NavGen) u16 {
|
fn largestSupportedIntBits(self: *NavGen) u16 {
|
||||||
return if (self.spv.hasFeature(.int64)) 64 else 32;
|
if (self.spv.hasFeature(.int64) or self.spv.target.cpu.arch == .spirv64) {
|
||||||
|
return 64;
|
||||||
|
}
|
||||||
|
return 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo {
|
fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo {
|
||||||
@ -736,8 +741,8 @@ const NavGen = struct {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const final_value: spec.LiteralContextDependentNumber = blk: {
|
const final_value: spec.LiteralContextDependentNumber = switch (self.spv.target.os.tag) {
|
||||||
if (self.spv.hasFeature(.kernel)) {
|
.opencl, .amdhsa => blk: {
|
||||||
const value64: u64 = switch (signedness) {
|
const value64: u64 = switch (signedness) {
|
||||||
.signed => @bitCast(@as(i64, @intCast(value))),
|
.signed => @bitCast(@as(i64, @intCast(value))),
|
||||||
.unsigned => @as(u64, @intCast(value)),
|
.unsigned => @as(u64, @intCast(value)),
|
||||||
@ -754,13 +759,12 @@ const NavGen = struct {
|
|||||||
33...64 => .{ .uint64 = truncated_value },
|
33...64 => .{ .uint64 = truncated_value },
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
}
|
},
|
||||||
|
else => switch (backing_bits) {
|
||||||
break :blk switch (backing_bits) {
|
|
||||||
1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) },
|
1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) },
|
||||||
33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value },
|
33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value },
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
const result_id = try self.spv.constant(result_ty_id, final_value);
|
const result_id = try self.spv.constant(result_ty_id, final_value);
|
||||||
@ -1276,12 +1280,11 @@ const NavGen = struct {
|
|||||||
return self.arrayType(backing_bits / big_int_bits, int_ty);
|
return self.arrayType(backing_bits / big_int_bits, int_ty);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kernel only supports unsigned ints.
|
return switch (self.spv.target.os.tag) {
|
||||||
if (self.spv.hasFeature(.kernel)) {
|
// Kernel only supports unsigned ints.
|
||||||
return self.spv.intType(.unsigned, backing_bits);
|
.opencl, .amdhsa => return self.spv.intType(.unsigned, backing_bits),
|
||||||
}
|
else => self.spv.intType(signedness, backing_bits),
|
||||||
|
};
|
||||||
return self.spv.intType(signedness, backing_bits);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn arrayType(self: *NavGen, len: u32, child_ty: IdRef) !IdRef {
|
fn arrayType(self: *NavGen, len: u32, child_ty: IdRef) !IdRef {
|
||||||
@ -1314,20 +1317,23 @@ const NavGen = struct {
|
|||||||
|
|
||||||
const child_ty_id = try self.resolveType(child_ty, child_repr);
|
const child_ty_id = try self.resolveType(child_ty, child_repr);
|
||||||
|
|
||||||
if (self.spv.hasFeature(.shader)) {
|
switch (self.spv.target.os.tag) {
|
||||||
if (child_ty.zigTypeTag(zcu) == .@"struct") {
|
.vulkan, .opengl => {
|
||||||
switch (storage_class) {
|
if (child_ty.zigTypeTag(zcu) == .@"struct") {
|
||||||
.Uniform, .PushConstant => try self.spv.decorate(child_ty_id, .Block),
|
switch (storage_class) {
|
||||||
else => {},
|
.Uniform, .PushConstant => try self.spv.decorate(child_ty_id, .Block),
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
switch (ip.indexToKey(child_ty.toIntern())) {
|
switch (ip.indexToKey(child_ty.toIntern())) {
|
||||||
.func_type, .opaque_type => {},
|
.func_type, .opaque_type => {},
|
||||||
else => {
|
else => {
|
||||||
try self.spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = @intCast(child_ty.abiSize(zcu)) } });
|
try self.spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = @intCast(child_ty.abiSize(zcu)) } });
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{
|
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{
|
||||||
@ -1554,10 +1560,13 @@ const NavGen = struct {
|
|||||||
return try self.arrayType(1, elem_ty_id);
|
return try self.arrayType(1, elem_ty_id);
|
||||||
} else {
|
} else {
|
||||||
const result_id = try self.arrayType(total_len, elem_ty_id);
|
const result_id = try self.arrayType(total_len, elem_ty_id);
|
||||||
if (self.spv.hasFeature(.shader)) {
|
switch (self.spv.target.os.tag) {
|
||||||
try self.spv.decorate(result_id, .{ .ArrayStride = .{
|
.vulkan, .opengl => {
|
||||||
.array_stride = @intCast(elem_ty.abiSize(zcu)),
|
try self.spv.decorate(result_id, .{ .ArrayStride = .{
|
||||||
} });
|
.array_stride = @intCast(elem_ty.abiSize(zcu)),
|
||||||
|
} });
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
}
|
}
|
||||||
return result_id;
|
return result_id;
|
||||||
}
|
}
|
||||||
@ -1688,11 +1697,15 @@ const NavGen = struct {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (self.spv.hasFeature(.shader)) {
|
switch (self.spv.target.os.tag) {
|
||||||
try self.spv.decorateMember(result_id, index, .{ .Offset = .{
|
.vulkan, .opengl => {
|
||||||
.byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)),
|
try self.spv.decorateMember(result_id, index, .{ .Offset = .{
|
||||||
} });
|
.byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)),
|
||||||
|
} });
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
|
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
|
||||||
try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
|
||||||
try member_types.append(try self.resolveType(field_ty, .indirect));
|
try member_types.append(try self.resolveType(field_ty, .indirect));
|
||||||
@ -1795,28 +1808,23 @@ const NavGen = struct {
|
|||||||
fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass {
|
fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass {
|
||||||
return switch (as) {
|
return switch (as) {
|
||||||
.generic => if (self.spv.hasFeature(.generic_pointer)) .Generic else .Function,
|
.generic => if (self.spv.hasFeature(.generic_pointer)) .Generic else .Function,
|
||||||
.global => {
|
.global => switch (self.spv.target.os.tag) {
|
||||||
if (self.spv.hasFeature(.kernel)) return .CrossWorkgroup;
|
.opencl, .amdhsa => .CrossWorkgroup,
|
||||||
return .StorageBuffer;
|
else => .StorageBuffer,
|
||||||
},
|
},
|
||||||
.push_constant => {
|
.push_constant => {
|
||||||
assert(self.spv.hasFeature(.shader));
|
|
||||||
return .PushConstant;
|
return .PushConstant;
|
||||||
},
|
},
|
||||||
.output => {
|
.output => {
|
||||||
assert(self.spv.hasFeature(.shader));
|
|
||||||
return .Output;
|
return .Output;
|
||||||
},
|
},
|
||||||
.uniform => {
|
.uniform => {
|
||||||
assert(self.spv.hasFeature(.shader));
|
|
||||||
return .Uniform;
|
return .Uniform;
|
||||||
},
|
},
|
||||||
.storage_buffer => {
|
.storage_buffer => {
|
||||||
assert(self.spv.hasFeature(.shader));
|
|
||||||
return .StorageBuffer;
|
return .StorageBuffer;
|
||||||
},
|
},
|
||||||
.physical_storage_buffer => {
|
.physical_storage_buffer => {
|
||||||
assert(self.spv.hasFeature(.physical_storage_buffer));
|
|
||||||
return .PhysicalStorageBuffer;
|
return .PhysicalStorageBuffer;
|
||||||
},
|
},
|
||||||
.constant => .UniformConstant,
|
.constant => .UniformConstant,
|
||||||
@ -2768,7 +2776,7 @@ const NavGen = struct {
|
|||||||
|
|
||||||
const p_error_id = self.spv.allocId();
|
const p_error_id = self.spv.allocId();
|
||||||
switch (target.os.tag) {
|
switch (target.os.tag) {
|
||||||
.opencl => {
|
.opencl, .amdhsa => {
|
||||||
const kernel_proto_ty_id = try self.functionType(Type.void, &.{ptr_anyerror_ty});
|
const kernel_proto_ty_id = try self.functionType(Type.void, &.{ptr_anyerror_ty});
|
||||||
|
|
||||||
try section.emit(self.spv.gpa, .OpFunction, .{
|
try section.emit(self.spv.gpa, .OpFunction, .{
|
||||||
@ -2876,7 +2884,7 @@ const NavGen = struct {
|
|||||||
|
|
||||||
const execution_mode: spec.ExecutionModel = switch (target.os.tag) {
|
const execution_mode: spec.ExecutionModel = switch (target.os.tag) {
|
||||||
.vulkan, .opengl => .GLCompute,
|
.vulkan, .opengl => .GLCompute,
|
||||||
.opencl => .Kernel,
|
.opencl, .amdhsa => .Kernel,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -3630,8 +3638,13 @@ const NavGen = struct {
|
|||||||
.integer, .strange_integer => {
|
.integer, .strange_integer => {
|
||||||
const abs_value = try self.buildUnary(.i_abs, value);
|
const abs_value = try self.buildUnary(.i_abs, value);
|
||||||
|
|
||||||
if (value.ty.intInfo(zcu).signedness == .signed and self.spv.hasFeature(.shader)) {
|
switch (self.spv.target.os.tag) {
|
||||||
return self.todo("perform bitcast after @abs", .{});
|
.vulkan, .opengl => {
|
||||||
|
if (value.ty.intInfo(zcu).signedness == .signed) {
|
||||||
|
return self.todo("perform bitcast after @abs", .{});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
return try self.normalize(abs_value, self.arithmeticTypeInfo(result_ty));
|
return try self.normalize(abs_value, self.arithmeticTypeInfo(result_ty));
|
||||||
@ -4156,22 +4169,25 @@ const NavGen = struct {
|
|||||||
defer self.gpa.free(ids);
|
defer self.gpa.free(ids);
|
||||||
|
|
||||||
const result_id = self.spv.allocId();
|
const result_id = self.spv.allocId();
|
||||||
if (self.spv.hasFeature(.addresses)) {
|
switch (self.spv.target.os.tag) {
|
||||||
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
|
.opencl, .amdhsa => {
|
||||||
.id_result_type = result_ty_id,
|
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
|
||||||
.id_result = result_id,
|
.id_result_type = result_ty_id,
|
||||||
.base = base,
|
.id_result = result_id,
|
||||||
.element = element,
|
.base = base,
|
||||||
.indexes = ids,
|
.element = element,
|
||||||
});
|
.indexes = ids,
|
||||||
} else {
|
});
|
||||||
try self.func.body.emit(self.spv.gpa, .OpPtrAccessChain, .{
|
},
|
||||||
.id_result_type = result_ty_id,
|
else => {
|
||||||
.id_result = result_id,
|
try self.func.body.emit(self.spv.gpa, .OpPtrAccessChain, .{
|
||||||
.base = base,
|
.id_result_type = result_ty_id,
|
||||||
.element = element,
|
.id_result = result_id,
|
||||||
.indexes = ids,
|
.base = base,
|
||||||
});
|
.element = element,
|
||||||
|
.indexes = ids,
|
||||||
|
});
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return result_id;
|
return result_id;
|
||||||
}
|
}
|
||||||
@ -4681,9 +4697,8 @@ const NavGen = struct {
|
|||||||
const field_int_ty = try self.pt.intType(.unsigned, ty_bit_size);
|
const field_int_ty = try self.pt.intType(.unsigned, ty_bit_size);
|
||||||
const field_int_id = blk: {
|
const field_int_id = blk: {
|
||||||
if (field_ty.isPtrAtRuntime(zcu)) {
|
if (field_ty.isPtrAtRuntime(zcu)) {
|
||||||
assert(self.spv.hasFeature(.addresses) or
|
assert(self.spv.target.cpu.arch == .spirv64 and
|
||||||
(self.spv.hasFeature(.physical_storage_buffer) and
|
field_ty.ptrAddressSpace(zcu) == .storage_buffer);
|
||||||
field_ty.ptrAddressSpace(zcu) == .storage_buffer));
|
|
||||||
break :blk try self.intFromPtr(field_id);
|
break :blk try self.intFromPtr(field_id);
|
||||||
}
|
}
|
||||||
break :blk try self.bitCast(field_int_ty, field_ty, field_id);
|
break :blk try self.bitCast(field_int_ty, field_ty, field_id);
|
||||||
@ -5333,7 +5348,10 @@ const NavGen = struct {
|
|||||||
.initializer = options.initializer,
|
.initializer = options.initializer,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (self.spv.hasFeature(.shader)) return var_id;
|
switch (self.spv.target.os.tag) {
|
||||||
|
.vulkan, .opengl => return var_id,
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
|
|
||||||
switch (options.storage_class) {
|
switch (options.storage_class) {
|
||||||
.Generic => {
|
.Generic => {
|
||||||
|
|||||||
@ -336,65 +336,55 @@ fn entryPoints(self: *Module) !Section {
|
|||||||
|
|
||||||
pub fn finalize(self: *Module, a: Allocator) ![]Word {
|
pub fn finalize(self: *Module, a: Allocator) ![]Word {
|
||||||
// Emit capabilities and extensions
|
// Emit capabilities and extensions
|
||||||
for (std.Target.spirv.all_features) |feature| {
|
switch (self.target.os.tag) {
|
||||||
if (self.target.cpu.features.isEnabled(feature.index)) {
|
.opengl => {
|
||||||
const feature_tag: std.Target.spirv.Feature = @enumFromInt(feature.index);
|
try self.addCapability(.Shader);
|
||||||
switch (feature_tag) {
|
try self.addCapability(.Matrix);
|
||||||
// Versions
|
},
|
||||||
.v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {},
|
.vulkan => {
|
||||||
// Features with no dependencies
|
try self.addCapability(.Shader);
|
||||||
.int64 => try self.addCapability(.Int64),
|
try self.addCapability(.Matrix);
|
||||||
.float16 => try self.addCapability(.Float16),
|
if (self.target.cpu.arch == .spirv64) {
|
||||||
.float64 => try self.addCapability(.Float64),
|
try self.addExtension("SPV_KHR_physical_storage_buffer");
|
||||||
.matrix => try self.addCapability(.Matrix),
|
try self.addCapability(.PhysicalStorageBufferAddresses);
|
||||||
.storage_push_constant16 => {
|
|
||||||
try self.addExtension("SPV_KHR_16bit_storage");
|
|
||||||
try self.addCapability(.StoragePushConstant16);
|
|
||||||
},
|
|
||||||
.arbitrary_precision_integers => {
|
|
||||||
try self.addExtension("SPV_INTEL_arbitrary_precision_integers");
|
|
||||||
try self.addCapability(.ArbitraryPrecisionIntegersINTEL);
|
|
||||||
},
|
|
||||||
.addresses => try self.addCapability(.Addresses),
|
|
||||||
// Kernel
|
|
||||||
.kernel => try self.addCapability(.Kernel),
|
|
||||||
.generic_pointer => try self.addCapability(.GenericPointer),
|
|
||||||
.vector16 => try self.addCapability(.Vector16),
|
|
||||||
// Shader
|
|
||||||
.shader => try self.addCapability(.Shader),
|
|
||||||
.variable_pointers => {
|
|
||||||
try self.addExtension("SPV_KHR_variable_pointers");
|
|
||||||
try self.addCapability(.VariablePointersStorageBuffer);
|
|
||||||
try self.addCapability(.VariablePointers);
|
|
||||||
},
|
|
||||||
.physical_storage_buffer => {
|
|
||||||
try self.addExtension("SPV_KHR_physical_storage_buffer");
|
|
||||||
try self.addCapability(.PhysicalStorageBufferAddresses);
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
.opencl, .amdhsa => {
|
||||||
|
try self.addCapability(.Kernel);
|
||||||
|
try self.addCapability(.Addresses);
|
||||||
|
},
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
if (self.target.cpu.arch == .spirv64) try self.addCapability(.Int64);
|
||||||
|
if (self.target.cpu.has(.spirv, .int64)) try self.addCapability(.Int64);
|
||||||
|
if (self.target.cpu.has(.spirv, .float16)) try self.addCapability(.Float16);
|
||||||
|
if (self.target.cpu.has(.spirv, .float64)) try self.addCapability(.Float64);
|
||||||
|
if (self.target.cpu.has(.spirv, .generic_pointer)) try self.addCapability(.GenericPointer);
|
||||||
|
if (self.target.cpu.has(.spirv, .vector16)) try self.addCapability(.Vector16);
|
||||||
|
if (self.target.cpu.has(.spirv, .storage_push_constant16)) {
|
||||||
|
try self.addExtension("SPV_KHR_16bit_storage");
|
||||||
|
try self.addCapability(.StoragePushConstant16);
|
||||||
|
}
|
||||||
|
if (self.target.cpu.has(.spirv, .arbitrary_precision_integers)) {
|
||||||
|
try self.addExtension("SPV_INTEL_arbitrary_precision_integers");
|
||||||
|
try self.addCapability(.ArbitraryPrecisionIntegersINTEL);
|
||||||
|
}
|
||||||
|
if (self.target.cpu.has(.spirv, .variable_pointers)) {
|
||||||
|
try self.addExtension("SPV_KHR_variable_pointers");
|
||||||
|
try self.addCapability(.VariablePointersStorageBuffer);
|
||||||
|
try self.addCapability(.VariablePointers);
|
||||||
}
|
}
|
||||||
// These are well supported
|
// These are well supported
|
||||||
try self.addCapability(.Int8);
|
try self.addCapability(.Int8);
|
||||||
try self.addCapability(.Int16);
|
try self.addCapability(.Int16);
|
||||||
|
|
||||||
// Emit memory model
|
// Emit memory model
|
||||||
const addressing_model: spec.AddressingModel = blk: {
|
const addressing_model: spec.AddressingModel = switch (self.target.os.tag) {
|
||||||
if (self.hasFeature(.shader)) {
|
.opengl => .Logical,
|
||||||
if (self.hasFeature(.physical_storage_buffer)) {
|
.vulkan => if (self.target.cpu.arch == .spirv32) .Logical else .PhysicalStorageBuffer64,
|
||||||
assert(self.target.cpu.arch == .spirv64);
|
.opencl => if (self.target.cpu.arch == .spirv32) .Physical32 else .Physical64,
|
||||||
break :blk .PhysicalStorageBuffer64;
|
.amdhsa => .Physical64,
|
||||||
}
|
else => unreachable,
|
||||||
assert(self.target.cpu.arch == .spirv);
|
|
||||||
break :blk .Logical;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(self.hasFeature(.kernel));
|
|
||||||
break :blk switch (self.target.cpu.arch) {
|
|
||||||
.spirv32 => .Physical32,
|
|
||||||
.spirv64 => .Physical64,
|
|
||||||
else => unreachable,
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{
|
try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{
|
||||||
.addressing_model = addressing_model,
|
.addressing_model = addressing_model,
|
||||||
|
|||||||
@ -88,7 +88,7 @@ pub const Env = enum {
|
|||||||
.powerpc_backend,
|
.powerpc_backend,
|
||||||
.riscv64_backend,
|
.riscv64_backend,
|
||||||
.sparc64_backend,
|
.sparc64_backend,
|
||||||
.spirv64_backend,
|
.spirv_backend,
|
||||||
.lld_linker,
|
.lld_linker,
|
||||||
.coff_linker,
|
.coff_linker,
|
||||||
.elf_linker,
|
.elf_linker,
|
||||||
@ -183,7 +183,7 @@ pub const Env = enum {
|
|||||||
else => Env.sema.supports(feature),
|
else => Env.sema.supports(feature),
|
||||||
},
|
},
|
||||||
.spirv => switch (feature) {
|
.spirv => switch (feature) {
|
||||||
.spirv64_backend,
|
.spirv_backend,
|
||||||
.spirv_linker,
|
.spirv_linker,
|
||||||
=> true,
|
=> true,
|
||||||
else => Env.sema.supports(feature),
|
else => Env.sema.supports(feature),
|
||||||
@ -258,7 +258,7 @@ pub const Feature = enum {
|
|||||||
powerpc_backend,
|
powerpc_backend,
|
||||||
riscv64_backend,
|
riscv64_backend,
|
||||||
sparc64_backend,
|
sparc64_backend,
|
||||||
spirv64_backend,
|
spirv_backend,
|
||||||
|
|
||||||
lld_linker,
|
lld_linker,
|
||||||
coff_linker,
|
coff_linker,
|
||||||
|
|||||||
@ -64,7 +64,7 @@ pub fn createEmpty(
|
|||||||
assert(!comp.config.use_llvm); // Caught by Compilation.Config.resolve
|
assert(!comp.config.use_llvm); // Caught by Compilation.Config.resolve
|
||||||
assert(target.ofmt == .spirv); // Caught by Compilation.Config.resolve
|
assert(target.ofmt == .spirv); // Caught by Compilation.Config.resolve
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
.spirv, .spirv32, .spirv64 => {},
|
.spirv32, .spirv64 => {},
|
||||||
else => unreachable, // Caught by Compilation.Config.resolve.
|
else => unreachable, // Caught by Compilation.Config.resolve.
|
||||||
}
|
}
|
||||||
switch (target.os.tag) {
|
switch (target.os.tag) {
|
||||||
|
|||||||
@ -179,7 +179,6 @@ pub fn hasLlvmSupport(target: *const std.Target, ofmt: std.Target.ObjectFormat)
|
|||||||
.riscv64,
|
.riscv64,
|
||||||
.sparc,
|
.sparc,
|
||||||
.sparc64,
|
.sparc64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.s390x,
|
.s390x,
|
||||||
@ -241,7 +240,7 @@ pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.Co
|
|||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
.spirv, .spirv32, .spirv64 => return false,
|
.spirv32, .spirv64 => return false,
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
return switch (backend) {
|
return switch (backend) {
|
||||||
@ -252,7 +251,7 @@ pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.Co
|
|||||||
|
|
||||||
pub fn clangSupportsStackProtector(target: *const std.Target) bool {
|
pub fn clangSupportsStackProtector(target: *const std.Target) bool {
|
||||||
return switch (target.cpu.arch) {
|
return switch (target.cpu.arch) {
|
||||||
.spirv, .spirv32, .spirv64 => return false,
|
.spirv32, .spirv64 => return false,
|
||||||
else => true,
|
else => true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -270,7 +269,7 @@ pub fn supportsReturnAddress(target: *const std.Target, optimize: std.builtin.Op
|
|||||||
// overhead that we would prefer to avoid in release builds.
|
// overhead that we would prefer to avoid in release builds.
|
||||||
.wasm32, .wasm64 => target.os.tag == .emscripten and optimize == .Debug,
|
.wasm32, .wasm64 => target.os.tag == .emscripten and optimize == .Debug,
|
||||||
.bpfel, .bpfeb => false,
|
.bpfel, .bpfeb => false,
|
||||||
.spirv, .spirv32, .spirv64 => false,
|
.spirv32, .spirv64 => false,
|
||||||
else => true,
|
else => true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -335,7 +334,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
|
|||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
.spirv, .spirv32, .spirv64 => return false,
|
.spirv32, .spirv64 => return false,
|
||||||
// Remove this once https://github.com/ziglang/zig/issues/23714 is fixed
|
// Remove this once https://github.com/ziglang/zig/issues/23714 is fixed
|
||||||
.amdgcn => return false,
|
.amdgcn => return false,
|
||||||
else => {},
|
else => {},
|
||||||
@ -352,7 +351,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
|
|||||||
|
|
||||||
pub fn canBuildLibUbsanRt(target: *const std.Target) bool {
|
pub fn canBuildLibUbsanRt(target: *const std.Target) bool {
|
||||||
switch (target.cpu.arch) {
|
switch (target.cpu.arch) {
|
||||||
.spirv, .spirv32, .spirv64 => return false,
|
.spirv32, .spirv64 => return false,
|
||||||
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
|
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
|
||||||
.nvptx, .nvptx64 => return false,
|
.nvptx, .nvptx64 => return false,
|
||||||
else => return true,
|
else => return true,
|
||||||
@ -719,7 +718,6 @@ pub fn supportsFunctionAlignment(target: *const std.Target) bool {
|
|||||||
return switch (target.cpu.arch) {
|
return switch (target.cpu.arch) {
|
||||||
.nvptx,
|
.nvptx,
|
||||||
.nvptx64,
|
.nvptx64,
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.wasm32,
|
.wasm32,
|
||||||
@ -816,8 +814,7 @@ pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.Compile
|
|||||||
.powerpc, .powerpcle, .powerpc64, .powerpc64le => .stage2_powerpc,
|
.powerpc, .powerpcle, .powerpc64, .powerpc64le => .stage2_powerpc,
|
||||||
.riscv64 => .stage2_riscv64,
|
.riscv64 => .stage2_riscv64,
|
||||||
.sparc64 => .stage2_sparc64,
|
.sparc64 => .stage2_sparc64,
|
||||||
.spirv32 => if (target.os.tag == .opencl) .stage2_spirv else .other,
|
.spirv32, .spirv64 => .stage2_spirv,
|
||||||
.spirv, .spirv64 => .stage2_spirv,
|
|
||||||
.wasm32, .wasm64 => .stage2_wasm,
|
.wasm32, .wasm64 => .stage2_wasm,
|
||||||
.x86 => .stage2_x86,
|
.x86 => .stage2_x86,
|
||||||
.x86_64 => .stage2_x86_64,
|
.x86_64 => .stage2_x86_64,
|
||||||
|
|||||||
@ -389,7 +389,6 @@ fn testFunction() !void {
|
|||||||
|
|
||||||
// Avoid looking at `typeInfoFooAligned` on targets which don't support function alignment.
|
// Avoid looking at `typeInfoFooAligned` on targets which don't support function alignment.
|
||||||
switch (builtin.target.cpu.arch) {
|
switch (builtin.target.cpu.arch) {
|
||||||
.spirv,
|
|
||||||
.spirv32,
|
.spirv32,
|
||||||
.spirv64,
|
.spirv64,
|
||||||
.wasm32,
|
.wasm32,
|
||||||
|
|||||||
@ -2,6 +2,6 @@ export fn entry() align(64) void {}
|
|||||||
|
|
||||||
// error
|
// error
|
||||||
// backend=stage2
|
// backend=stage2
|
||||||
// target=nvptx-cuda,nvptx64-cuda,spirv-vulkan,spirv32-opencl,spirv64-opencl,wasm32-freestanding,wasm64-freestanding
|
// target=nvptx-cuda,nvptx64-cuda,spirv32-opengl,spirv32-vulkan,spirv32-opencl,spirv64-opencl,spirv64-amdhsa,wasm32-freestanding,wasm64-freestanding
|
||||||
//
|
//
|
||||||
// :1:25: error: target does not support function alignment
|
// :1:25: error: target does not support function alignment
|
||||||
|
|||||||
@ -1440,7 +1440,7 @@ const test_targets = blk: {
|
|||||||
.{
|
.{
|
||||||
.target = std.Target.Query.parse(.{
|
.target = std.Target.Query.parse(.{
|
||||||
.arch_os_abi = "spirv64-vulkan",
|
.arch_os_abi = "spirv64-vulkan",
|
||||||
.cpu_features = "vulkan_v1_2+physical_storage_buffer+int64+float16+float64",
|
.cpu_features = "vulkan_v1_2+float16+float64",
|
||||||
}) catch unreachable,
|
}) catch unreachable,
|
||||||
.use_llvm = false,
|
.use_llvm = false,
|
||||||
.use_lld = false,
|
.use_lld = false,
|
||||||
@ -2527,7 +2527,7 @@ fn wouldUseLlvm(use_llvm: ?bool, query: std.Target.Query, optimize_mode: Optimiz
|
|||||||
const cpu_arch = query.cpu_arch orelse builtin.cpu.arch;
|
const cpu_arch = query.cpu_arch orelse builtin.cpu.arch;
|
||||||
switch (cpu_arch) {
|
switch (cpu_arch) {
|
||||||
.x86_64 => if (std.Target.ptrBitWidth_arch_abi(cpu_arch, query.abi orelse .none) != 64) return true,
|
.x86_64 => if (std.Target.ptrBitWidth_arch_abi(cpu_arch, query.abi orelse .none) != 64) return true,
|
||||||
.spirv, .spirv32, .spirv64 => return false,
|
.spirv32, .spirv64 => return false,
|
||||||
else => return true,
|
else => return true,
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@ -1105,11 +1105,6 @@ const targets = [_]ArchTarget{
|
|||||||
.desc = "Enable Float64 capability",
|
.desc = "Enable Float64 capability",
|
||||||
.deps = &.{"v1_0"},
|
.deps = &.{"v1_0"},
|
||||||
},
|
},
|
||||||
.{
|
|
||||||
.zig_name = "matrix",
|
|
||||||
.desc = "Enable Matrix capability",
|
|
||||||
.deps = &.{"v1_0"},
|
|
||||||
},
|
|
||||||
.{
|
.{
|
||||||
.zig_name = "storage_push_constant16",
|
.zig_name = "storage_push_constant16",
|
||||||
.desc = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability",
|
.desc = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability",
|
||||||
@ -1120,52 +1115,32 @@ const targets = [_]ArchTarget{
|
|||||||
.desc = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability",
|
.desc = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability",
|
||||||
.deps = &.{"v1_5"},
|
.deps = &.{"v1_5"},
|
||||||
},
|
},
|
||||||
.{
|
|
||||||
.zig_name = "kernel",
|
|
||||||
.desc = "Enable Kernel capability",
|
|
||||||
.deps = &.{"v1_0"},
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.zig_name = "addresses",
|
|
||||||
.desc = "Enable Addresses capability",
|
|
||||||
.deps = &.{"v1_0"},
|
|
||||||
},
|
|
||||||
.{
|
.{
|
||||||
.zig_name = "generic_pointer",
|
.zig_name = "generic_pointer",
|
||||||
.desc = "Enable GenericPointer capability",
|
.desc = "Enable GenericPointer capability",
|
||||||
.deps = &.{ "v1_0", "addresses" },
|
.deps = &.{"v1_0"},
|
||||||
},
|
},
|
||||||
.{
|
.{
|
||||||
.zig_name = "vector16",
|
.zig_name = "vector16",
|
||||||
.desc = "Enable Vector16 capability",
|
.desc = "Enable Vector16 capability",
|
||||||
.deps = &.{ "v1_0", "kernel" },
|
.deps = &.{"v1_0"},
|
||||||
},
|
|
||||||
.{
|
|
||||||
.zig_name = "shader",
|
|
||||||
.desc = "Enable Shader capability",
|
|
||||||
.deps = &.{ "v1_0", "matrix" },
|
|
||||||
},
|
},
|
||||||
.{
|
.{
|
||||||
.zig_name = "variable_pointers",
|
.zig_name = "variable_pointers",
|
||||||
.desc = "Enable SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability",
|
.desc = "Enable SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability",
|
||||||
.deps = &.{"v1_0"},
|
.deps = &.{"v1_0"},
|
||||||
},
|
},
|
||||||
.{
|
|
||||||
.zig_name = "physical_storage_buffer",
|
|
||||||
.desc = "Enable SPV_KHR_variable_pointers extension and the (VariablePointers, VariablePointersStorageBuffer) capabilities",
|
|
||||||
.deps = &.{"v1_0"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
.extra_cpus = &.{
|
.extra_cpus = &.{
|
||||||
.{
|
.{
|
||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.zig_name = "vulkan_v1_2",
|
.zig_name = "vulkan_v1_2",
|
||||||
.features = &.{ "v1_5", "shader" },
|
.features = &.{"v1_5"},
|
||||||
},
|
},
|
||||||
.{
|
.{
|
||||||
.llvm_name = null,
|
.llvm_name = null,
|
||||||
.zig_name = "opencl_v2",
|
.zig_name = "opencl_v2",
|
||||||
.features = &.{ "v1_2", "kernel", "addresses", "generic_pointer" },
|
.features = &.{"v1_2"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user