diff --git a/lib/compiler/aro/aro/target.zig b/lib/compiler/aro/aro/target.zig index bc7b79a9c3..457b93e0cf 100644 --- a/lib/compiler/aro/aro/target.zig +++ b/lib/compiler/aro/aro/target.zig @@ -486,7 +486,6 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target { .kalimba, .lanai, .wasm32, - .spirv, .spirv32, .loongarch32, .xtensa, @@ -554,7 +553,6 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target { .powerpcle => copy.cpu.arch = .powerpc64le, .riscv32 => copy.cpu.arch = .riscv64, .sparc => copy.cpu.arch = .sparc64, - .spirv => copy.cpu.arch = .spirv64, .spirv32 => copy.cpu.arch = .spirv64, .thumb => copy.cpu.arch = .aarch64, .thumbeb => copy.cpu.arch = .aarch64_be, @@ -609,7 +607,6 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .xtensa => "xtensa", .nvptx => "nvptx", .nvptx64 => "nvptx64", - .spirv => "spirv", .spirv32 => "spirv32", .spirv64 => "spirv64", .kalimba => "kalimba", diff --git a/lib/compiler_rt/common.zig b/lib/compiler_rt/common.zig index 978e701130..f5423019f1 100644 --- a/lib/compiler_rt/common.zig +++ b/lib/compiler_rt/common.zig @@ -120,7 +120,6 @@ pub fn F16T(comptime OtherType: type) type { .nvptx64, .riscv32, .riscv64, - .spirv, .spirv32, .spirv64, => f16, diff --git a/lib/std/Target.zig b/lib/std/Target.zig index b31f6c2de9..18d37e6bf6 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -1066,7 +1066,7 @@ pub const ObjectFormat = enum { .uefi, .windows => .coff, .zos => .goff, else => switch (arch) { - .spirv, .spirv32, .spirv64 => .spirv, + .spirv32, .spirv64 => .spirv, .wasm32, .wasm64 => .wasm, else => .elf, }, @@ -1106,7 +1106,6 @@ pub fn toElfMachine(target: *const Target) std.elf.EM { .nvptx, .nvptx64, - .spirv, .spirv32, .spirv64, .wasm32, @@ -1155,7 +1154,6 @@ pub fn toCoffMachine(target: *const Target) std.coff.MachineType { .s390x, .sparc, .sparc64, - .spirv, .spirv32, .spirv64, .ve, @@ -1368,7 +1366,6 @@ pub const Cpu = struct { s390x, sparc, sparc64, - spirv, spirv32, spirv64, ve, @@ -1454,7 +1451,7 @@ pub const Cpu = struct { .riscv32, .riscv64 => .riscv, .s390x => .s390x, .sparc, .sparc64 => .sparc, - .spirv, .spirv32, .spirv64 => .spirv, + .spirv32, .spirv64 => .spirv, .ve => .ve, .wasm32, .wasm64 => .wasm, .x86, .x86_64 => .x86, @@ -1558,7 +1555,7 @@ pub const Cpu = struct { pub inline fn isSpirV(arch: Arch) bool { return switch (arch) { - .spirv, .spirv32, .spirv64 => true, + .spirv32, .spirv64 => true, else => false, }; } @@ -1614,7 +1611,6 @@ pub const Cpu = struct { .thumb, .ve, // GPU bitness is opaque. For now, assume little endian. - .spirv, .spirv32, .spirv64, .loongarch32, @@ -1843,7 +1839,7 @@ pub const Cpu = struct { .spirv_kernel, .spirv_fragment, .spirv_vertex, - => &.{ .spirv, .spirv32, .spirv64 }, + => &.{ .spirv32, .spirv64 }, }; } }; @@ -2638,7 +2634,6 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 { .sparc64, .s390x, .ve, - .spirv, .spirv64, .loongarch64, => 64, @@ -3157,7 +3152,6 @@ pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 { .riscv32, .riscv64, .sparc64, - .spirv, .spirv32, .spirv64, .x86_64, @@ -3250,7 +3244,6 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 { .riscv32, .riscv64, .sparc64, - .spirv, .spirv32, .spirv64, .x86_64, @@ -3319,7 +3312,6 @@ pub fn cMaxIntAlignment(target: *const Target) u16 { .loongarch32, .loongarch64, .m68k, - .spirv, .spirv32, .spirv64, .ve, @@ -3389,7 +3381,7 @@ pub fn cCallingConvention(target: *const Target) ?std.builtin.CallingConvention .xtensa => .{ .xtensa_call0 = .{} }, .amdgcn => .{ .amdgcn_device = .{} }, .nvptx, .nvptx64 => .nvptx_device, - .spirv, .spirv32, .spirv64 => .spirv_device, + .spirv32, .spirv64 => .spirv_device, }; } diff --git a/lib/std/Target/spirv.zig b/lib/std/Target/spirv.zig index 229d77a6d6..28fda54e9d 100644 --- a/lib/std/Target/spirv.zig +++ b/lib/std/Target/spirv.zig @@ -5,16 +5,11 @@ const CpuFeature = std.Target.Cpu.Feature; const CpuModel = std.Target.Cpu.Model; pub const Feature = enum { - addresses, arbitrary_precision_integers, float16, float64, generic_pointer, int64, - kernel, - matrix, - physical_storage_buffer, - shader, storage_push_constant16, v1_0, v1_1, @@ -37,13 +32,6 @@ pub const all_features = blk: { const len = @typeInfo(Feature).@"enum".fields.len; std.debug.assert(len <= CpuFeature.Set.needed_bit_count); var result: [len]CpuFeature = undefined; - result[@intFromEnum(Feature.addresses)] = .{ - .llvm_name = null, - .description = "Enable Addresses capability", - .dependencies = featureSet(&[_]Feature{ - .v1_0, - }), - }; result[@intFromEnum(Feature.arbitrary_precision_integers)] = .{ .llvm_name = null, .description = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability", @@ -69,7 +57,7 @@ pub const all_features = blk: { .llvm_name = null, .description = "Enable GenericPointer capability", .dependencies = featureSet(&[_]Feature{ - .addresses, + .v1_0, }), }; result[@intFromEnum(Feature.int64)] = .{ @@ -79,34 +67,6 @@ pub const all_features = blk: { .v1_0, }), }; - result[@intFromEnum(Feature.kernel)] = .{ - .llvm_name = null, - .description = "Enable Kernel capability", - .dependencies = featureSet(&[_]Feature{ - .v1_0, - }), - }; - result[@intFromEnum(Feature.matrix)] = .{ - .llvm_name = null, - .description = "Enable Matrix capability", - .dependencies = featureSet(&[_]Feature{ - .v1_0, - }), - }; - result[@intFromEnum(Feature.physical_storage_buffer)] = .{ - .llvm_name = null, - .description = "Enable SPV_KHR_variable_pointers extension and the (VariablePointers, VariablePointersStorageBuffer) capabilities", - .dependencies = featureSet(&[_]Feature{ - .v1_0, - }), - }; - result[@intFromEnum(Feature.shader)] = .{ - .llvm_name = null, - .description = "Enable Shader capability", - .dependencies = featureSet(&[_]Feature{ - .matrix, - }), - }; result[@intFromEnum(Feature.storage_push_constant16)] = .{ .llvm_name = null, .description = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability", @@ -172,7 +132,7 @@ pub const all_features = blk: { .llvm_name = null, .description = "Enable Vector16 capability", .dependencies = featureSet(&[_]Feature{ - .kernel, + .v1_0, }), }; const ti = @typeInfo(Feature); @@ -193,8 +153,6 @@ pub const cpu = struct { .name = "opencl_v2", .llvm_name = null, .features = featureSet(&[_]Feature{ - .generic_pointer, - .kernel, .v1_2, }), }; @@ -202,7 +160,6 @@ pub const cpu = struct { .name = "vulkan_v1_2", .llvm_name = null, .features = featureSet(&[_]Feature{ - .shader, .v1_5, }), }; diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index d262a70293..9df042f86f 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -189,7 +189,7 @@ pub const CallingConvention = union(enum(u8)) { pub const kernel: CallingConvention = switch (builtin.target.cpu.arch) { .amdgcn => .amdgcn_kernel, .nvptx, .nvptx64 => .nvptx_kernel, - .spirv, .spirv32, .spirv64 => .spirv_kernel, + .spirv32, .spirv64 => .spirv_kernel, else => unreachable, }; diff --git a/lib/std/debug/Dwarf/abi.zig b/lib/std/debug/Dwarf/abi.zig index e880b12863..b8b644e026 100644 --- a/lib/std/debug/Dwarf/abi.zig +++ b/lib/std/debug/Dwarf/abi.zig @@ -14,7 +14,6 @@ pub fn supportsUnwinding(target: *const std.Target) bool { .amdgcn, .nvptx, .nvptx64, - .spirv, .spirv32, .spirv64, => false, diff --git a/src/Sema.zig b/src/Sema.zig index 7348c6fce3..20be9e7052 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -26289,7 +26289,7 @@ fn zirWorkItem( switch (target.cpu.arch) { // TODO: Allow for other GPU targets. - .amdgcn, .spirv, .spirv64, .spirv32, .nvptx, .nvptx64 => {}, + .amdgcn, .spirv64, .spirv32, .nvptx, .nvptx64 => {}, else => { return sema.fail(block, builtin_src, "builtin only available on GPU targets; targeted architecture is {s}", .{@tagName(target.cpu.arch)}); }, diff --git a/src/Zcu.zig b/src/Zcu.zig index cb7e8d2a88..e69a66353f 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3935,7 +3935,6 @@ pub fn atomicPtrAlignment( .s390x, .wasm64, .ve, - .spirv, .spirv64, .loongarch64, => 64, diff --git a/src/codegen.zig b/src/codegen.zig index 11b6eedc86..74a5b90d25 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -37,7 +37,7 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature { .stage2_powerpc => .powerpc_backend, .stage2_riscv64 => .riscv64_backend, .stage2_sparc64 => .sparc64_backend, - .stage2_spirv => .spirv64_backend, + .stage2_spirv => .spirv_backend, .stage2_wasm => .wasm_backend, .stage2_x86 => .x86_backend, .stage2_x86_64 => .x86_64_backend, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e1671e9140..225bc050d5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -93,8 +93,10 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 .xtensa => "xtensa", .nvptx => "nvptx", .nvptx64 => "nvptx64", - .spirv => "spirv", - .spirv32 => "spirv32", + .spirv32 => switch (target.os.tag) { + .vulkan, .opengl => "spirv", + else => "spirv32", + }, .spirv64 => "spirv64", .lanai => "lanai", .wasm32 => "wasm32", @@ -150,9 +152,6 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 .powerpc => subArchName(target, .powerpc, .{ .{ .spe, "spe" }, }), - .spirv => subArchName(target, .spirv, .{ - .{ .v1_5, "1.5" }, - }), .spirv32, .spirv64 => subArchName(target, .spirv, .{ .{ .v1_5, "1.5" }, .{ .v1_4, "1.4" }, @@ -441,8 +440,10 @@ pub fn dataLayout(target: *const std.Target) []const u8 { else "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", }, - .spirv => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", - .spirv32 => "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + .spirv32 => switch (target.os.tag) { + .vulkan, .opengl => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + else => "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", + }, .spirv64 => "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-G1", .wasm32 => if (target.os.tag == .emscripten) "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-i128:128-f128:64-n32:64-S128-ni:1:10:20" @@ -13129,7 +13130,6 @@ pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { llvm.LLVMInitializeLoongArchAsmPrinter(); llvm.LLVMInitializeLoongArchAsmParser(); }, - .spirv, .spirv32, .spirv64, => { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 1b99f1bad2..858c04d53d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -439,7 +439,7 @@ const NavGen = struct { fn importExtendedSet(self: *NavGen) !IdResult { const target = self.spv.target; return switch (target.os.tag) { - .opencl => try self.spv.importInstructionSet(.@"OpenCL.std"), + .opencl, .amdhsa => try self.spv.importInstructionSet(.@"OpenCL.std"), .vulkan, .opengl => try self.spv.importInstructionSet(.@"GLSL.std.450"), else => unreachable, }; @@ -561,7 +561,7 @@ const NavGen = struct { } fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef { - if (self.spv.hasFeature(.kernel)) { + if (self.spv.hasFeature(.generic_pointer)) { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{ .id_result_type = type_id, @@ -601,16 +601,18 @@ const NavGen = struct { // We require Int8 and Int16 capabilities and benefit Int64 when available. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). - const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{ - .{ .bits = 8, .feature = null }, - .{ .bits = 16, .feature = null }, - .{ .bits = 32, .feature = null }, - .{ .bits = 64, .feature = .int64 }, + const ints = [_]struct { bits: u16, enabled: bool }{ + .{ .bits = 8, .enabled = true }, + .{ .bits = 16, .enabled = true }, + .{ .bits = 32, .enabled = true }, + .{ + .bits = 64, + .enabled = self.spv.hasFeature(.int64) or self.spv.target.cpu.arch == .spirv64, + }, }; for (ints) |int| { - const has_feature = if (int.feature) |feature| self.spv.hasFeature(feature) else true; - if (bits <= int.bits and has_feature) return .{ int.bits, false }; + if (bits <= int.bits and int.enabled) return .{ int.bits, false }; } // Big int @@ -624,7 +626,10 @@ const NavGen = struct { /// is no way of knowing whether those are actually supported. /// TODO: Maybe this should be cached? fn largestSupportedIntBits(self: *NavGen) u16 { - return if (self.spv.hasFeature(.int64)) 64 else 32; + if (self.spv.hasFeature(.int64) or self.spv.target.cpu.arch == .spirv64) { + return 64; + } + return 32; } fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo { @@ -736,8 +741,8 @@ const NavGen = struct { }); } - const final_value: spec.LiteralContextDependentNumber = blk: { - if (self.spv.hasFeature(.kernel)) { + const final_value: spec.LiteralContextDependentNumber = switch (self.spv.target.os.tag) { + .opencl, .amdhsa => blk: { const value64: u64 = switch (signedness) { .signed => @bitCast(@as(i64, @intCast(value))), .unsigned => @as(u64, @intCast(value)), @@ -754,13 +759,12 @@ const NavGen = struct { 33...64 => .{ .uint64 = truncated_value }, else => unreachable, }; - } - - break :blk switch (backing_bits) { + }, + else => switch (backing_bits) { 1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) }, 33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value }, else => unreachable, - }; + }, }; const result_id = try self.spv.constant(result_ty_id, final_value); @@ -1276,12 +1280,11 @@ const NavGen = struct { return self.arrayType(backing_bits / big_int_bits, int_ty); } - // Kernel only supports unsigned ints. - if (self.spv.hasFeature(.kernel)) { - return self.spv.intType(.unsigned, backing_bits); - } - - return self.spv.intType(signedness, backing_bits); + return switch (self.spv.target.os.tag) { + // Kernel only supports unsigned ints. + .opencl, .amdhsa => return self.spv.intType(.unsigned, backing_bits), + else => self.spv.intType(signedness, backing_bits), + }; } fn arrayType(self: *NavGen, len: u32, child_ty: IdRef) !IdRef { @@ -1314,20 +1317,23 @@ const NavGen = struct { const child_ty_id = try self.resolveType(child_ty, child_repr); - if (self.spv.hasFeature(.shader)) { - if (child_ty.zigTypeTag(zcu) == .@"struct") { - switch (storage_class) { - .Uniform, .PushConstant => try self.spv.decorate(child_ty_id, .Block), - else => {}, + switch (self.spv.target.os.tag) { + .vulkan, .opengl => { + if (child_ty.zigTypeTag(zcu) == .@"struct") { + switch (storage_class) { + .Uniform, .PushConstant => try self.spv.decorate(child_ty_id, .Block), + else => {}, + } } - } - switch (ip.indexToKey(child_ty.toIntern())) { - .func_type, .opaque_type => {}, - else => { - try self.spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = @intCast(child_ty.abiSize(zcu)) } }); - }, - } + switch (ip.indexToKey(child_ty.toIntern())) { + .func_type, .opaque_type => {}, + else => { + try self.spv.decorate(result_id, .{ .ArrayStride = .{ .array_stride = @intCast(child_ty.abiSize(zcu)) } }); + }, + } + }, + else => {}, } try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{ @@ -1554,10 +1560,13 @@ const NavGen = struct { return try self.arrayType(1, elem_ty_id); } else { const result_id = try self.arrayType(total_len, elem_ty_id); - if (self.spv.hasFeature(.shader)) { - try self.spv.decorate(result_id, .{ .ArrayStride = .{ - .array_stride = @intCast(elem_ty.abiSize(zcu)), - } }); + switch (self.spv.target.os.tag) { + .vulkan, .opengl => { + try self.spv.decorate(result_id, .{ .ArrayStride = .{ + .array_stride = @intCast(elem_ty.abiSize(zcu)), + } }); + }, + else => {}, } return result_id; } @@ -1688,11 +1697,15 @@ const NavGen = struct { continue; } - if (self.spv.hasFeature(.shader)) { - try self.spv.decorateMember(result_id, index, .{ .Offset = .{ - .byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)), - } }); + switch (self.spv.target.os.tag) { + .vulkan, .opengl => { + try self.spv.decorateMember(result_id, index, .{ .Offset = .{ + .byte_offset = @intCast(ty.structFieldOffset(field_index, zcu)), + } }); + }, + else => {}, } + const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); try member_types.append(try self.resolveType(field_ty, .indirect)); @@ -1795,28 +1808,23 @@ const NavGen = struct { fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass { return switch (as) { .generic => if (self.spv.hasFeature(.generic_pointer)) .Generic else .Function, - .global => { - if (self.spv.hasFeature(.kernel)) return .CrossWorkgroup; - return .StorageBuffer; + .global => switch (self.spv.target.os.tag) { + .opencl, .amdhsa => .CrossWorkgroup, + else => .StorageBuffer, }, .push_constant => { - assert(self.spv.hasFeature(.shader)); return .PushConstant; }, .output => { - assert(self.spv.hasFeature(.shader)); return .Output; }, .uniform => { - assert(self.spv.hasFeature(.shader)); return .Uniform; }, .storage_buffer => { - assert(self.spv.hasFeature(.shader)); return .StorageBuffer; }, .physical_storage_buffer => { - assert(self.spv.hasFeature(.physical_storage_buffer)); return .PhysicalStorageBuffer; }, .constant => .UniformConstant, @@ -2768,7 +2776,7 @@ const NavGen = struct { const p_error_id = self.spv.allocId(); switch (target.os.tag) { - .opencl => { + .opencl, .amdhsa => { const kernel_proto_ty_id = try self.functionType(Type.void, &.{ptr_anyerror_ty}); try section.emit(self.spv.gpa, .OpFunction, .{ @@ -2876,7 +2884,7 @@ const NavGen = struct { const execution_mode: spec.ExecutionModel = switch (target.os.tag) { .vulkan, .opengl => .GLCompute, - .opencl => .Kernel, + .opencl, .amdhsa => .Kernel, else => unreachable, }; @@ -3630,8 +3638,13 @@ const NavGen = struct { .integer, .strange_integer => { const abs_value = try self.buildUnary(.i_abs, value); - if (value.ty.intInfo(zcu).signedness == .signed and self.spv.hasFeature(.shader)) { - return self.todo("perform bitcast after @abs", .{}); + switch (self.spv.target.os.tag) { + .vulkan, .opengl => { + if (value.ty.intInfo(zcu).signedness == .signed) { + return self.todo("perform bitcast after @abs", .{}); + } + }, + else => {}, } return try self.normalize(abs_value, self.arithmeticTypeInfo(result_ty)); @@ -4156,22 +4169,25 @@ const NavGen = struct { defer self.gpa.free(ids); const result_id = self.spv.allocId(); - if (self.spv.hasFeature(.addresses)) { - try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{ - .id_result_type = result_ty_id, - .id_result = result_id, - .base = base, - .element = element, - .indexes = ids, - }); - } else { - try self.func.body.emit(self.spv.gpa, .OpPtrAccessChain, .{ - .id_result_type = result_ty_id, - .id_result = result_id, - .base = base, - .element = element, - .indexes = ids, - }); + switch (self.spv.target.os.tag) { + .opencl, .amdhsa => { + try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{ + .id_result_type = result_ty_id, + .id_result = result_id, + .base = base, + .element = element, + .indexes = ids, + }); + }, + else => { + try self.func.body.emit(self.spv.gpa, .OpPtrAccessChain, .{ + .id_result_type = result_ty_id, + .id_result = result_id, + .base = base, + .element = element, + .indexes = ids, + }); + }, } return result_id; } @@ -4681,9 +4697,8 @@ const NavGen = struct { const field_int_ty = try self.pt.intType(.unsigned, ty_bit_size); const field_int_id = blk: { if (field_ty.isPtrAtRuntime(zcu)) { - assert(self.spv.hasFeature(.addresses) or - (self.spv.hasFeature(.physical_storage_buffer) and - field_ty.ptrAddressSpace(zcu) == .storage_buffer)); + assert(self.spv.target.cpu.arch == .spirv64 and + field_ty.ptrAddressSpace(zcu) == .storage_buffer); break :blk try self.intFromPtr(field_id); } break :blk try self.bitCast(field_int_ty, field_ty, field_id); @@ -5333,7 +5348,10 @@ const NavGen = struct { .initializer = options.initializer, }); - if (self.spv.hasFeature(.shader)) return var_id; + switch (self.spv.target.os.tag) { + .vulkan, .opengl => return var_id, + else => {}, + } switch (options.storage_class) { .Generic => { diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index f13d093b09..0b2f1aaab3 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -336,65 +336,55 @@ fn entryPoints(self: *Module) !Section { pub fn finalize(self: *Module, a: Allocator) ![]Word { // Emit capabilities and extensions - for (std.Target.spirv.all_features) |feature| { - if (self.target.cpu.features.isEnabled(feature.index)) { - const feature_tag: std.Target.spirv.Feature = @enumFromInt(feature.index); - switch (feature_tag) { - // Versions - .v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {}, - // Features with no dependencies - .int64 => try self.addCapability(.Int64), - .float16 => try self.addCapability(.Float16), - .float64 => try self.addCapability(.Float64), - .matrix => try self.addCapability(.Matrix), - .storage_push_constant16 => { - try self.addExtension("SPV_KHR_16bit_storage"); - try self.addCapability(.StoragePushConstant16); - }, - .arbitrary_precision_integers => { - try self.addExtension("SPV_INTEL_arbitrary_precision_integers"); - try self.addCapability(.ArbitraryPrecisionIntegersINTEL); - }, - .addresses => try self.addCapability(.Addresses), - // Kernel - .kernel => try self.addCapability(.Kernel), - .generic_pointer => try self.addCapability(.GenericPointer), - .vector16 => try self.addCapability(.Vector16), - // Shader - .shader => try self.addCapability(.Shader), - .variable_pointers => { - try self.addExtension("SPV_KHR_variable_pointers"); - try self.addCapability(.VariablePointersStorageBuffer); - try self.addCapability(.VariablePointers); - }, - .physical_storage_buffer => { - try self.addExtension("SPV_KHR_physical_storage_buffer"); - try self.addCapability(.PhysicalStorageBufferAddresses); - }, + switch (self.target.os.tag) { + .opengl => { + try self.addCapability(.Shader); + try self.addCapability(.Matrix); + }, + .vulkan => { + try self.addCapability(.Shader); + try self.addCapability(.Matrix); + if (self.target.cpu.arch == .spirv64) { + try self.addExtension("SPV_KHR_physical_storage_buffer"); + try self.addCapability(.PhysicalStorageBufferAddresses); } - } + }, + .opencl, .amdhsa => { + try self.addCapability(.Kernel); + try self.addCapability(.Addresses); + }, + else => unreachable, + } + if (self.target.cpu.arch == .spirv64) try self.addCapability(.Int64); + if (self.target.cpu.has(.spirv, .int64)) try self.addCapability(.Int64); + if (self.target.cpu.has(.spirv, .float16)) try self.addCapability(.Float16); + if (self.target.cpu.has(.spirv, .float64)) try self.addCapability(.Float64); + if (self.target.cpu.has(.spirv, .generic_pointer)) try self.addCapability(.GenericPointer); + if (self.target.cpu.has(.spirv, .vector16)) try self.addCapability(.Vector16); + if (self.target.cpu.has(.spirv, .storage_push_constant16)) { + try self.addExtension("SPV_KHR_16bit_storage"); + try self.addCapability(.StoragePushConstant16); + } + if (self.target.cpu.has(.spirv, .arbitrary_precision_integers)) { + try self.addExtension("SPV_INTEL_arbitrary_precision_integers"); + try self.addCapability(.ArbitraryPrecisionIntegersINTEL); + } + if (self.target.cpu.has(.spirv, .variable_pointers)) { + try self.addExtension("SPV_KHR_variable_pointers"); + try self.addCapability(.VariablePointersStorageBuffer); + try self.addCapability(.VariablePointers); } // These are well supported try self.addCapability(.Int8); try self.addCapability(.Int16); // Emit memory model - const addressing_model: spec.AddressingModel = blk: { - if (self.hasFeature(.shader)) { - if (self.hasFeature(.physical_storage_buffer)) { - assert(self.target.cpu.arch == .spirv64); - break :blk .PhysicalStorageBuffer64; - } - assert(self.target.cpu.arch == .spirv); - break :blk .Logical; - } - - assert(self.hasFeature(.kernel)); - break :blk switch (self.target.cpu.arch) { - .spirv32 => .Physical32, - .spirv64 => .Physical64, - else => unreachable, - }; + const addressing_model: spec.AddressingModel = switch (self.target.os.tag) { + .opengl => .Logical, + .vulkan => if (self.target.cpu.arch == .spirv32) .Logical else .PhysicalStorageBuffer64, + .opencl => if (self.target.cpu.arch == .spirv32) .Physical32 else .Physical64, + .amdhsa => .Physical64, + else => unreachable, }; try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{ .addressing_model = addressing_model, diff --git a/src/dev.zig b/src/dev.zig index 25c2d01a4b..7cf09f23b0 100644 --- a/src/dev.zig +++ b/src/dev.zig @@ -88,7 +88,7 @@ pub const Env = enum { .powerpc_backend, .riscv64_backend, .sparc64_backend, - .spirv64_backend, + .spirv_backend, .lld_linker, .coff_linker, .elf_linker, @@ -183,7 +183,7 @@ pub const Env = enum { else => Env.sema.supports(feature), }, .spirv => switch (feature) { - .spirv64_backend, + .spirv_backend, .spirv_linker, => true, else => Env.sema.supports(feature), @@ -258,7 +258,7 @@ pub const Feature = enum { powerpc_backend, riscv64_backend, sparc64_backend, - spirv64_backend, + spirv_backend, lld_linker, coff_linker, diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 1e01a50a6c..7b908d56ed 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -64,7 +64,7 @@ pub fn createEmpty( assert(!comp.config.use_llvm); // Caught by Compilation.Config.resolve assert(target.ofmt == .spirv); // Caught by Compilation.Config.resolve switch (target.cpu.arch) { - .spirv, .spirv32, .spirv64 => {}, + .spirv32, .spirv64 => {}, else => unreachable, // Caught by Compilation.Config.resolve. } switch (target.os.tag) { diff --git a/src/target.zig b/src/target.zig index f969849037..d57e1e4adb 100644 --- a/src/target.zig +++ b/src/target.zig @@ -179,7 +179,6 @@ pub fn hasLlvmSupport(target: *const std.Target, ofmt: std.Target.ObjectFormat) .riscv64, .sparc, .sparc64, - .spirv, .spirv32, .spirv64, .s390x, @@ -241,7 +240,7 @@ pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.Co else => {}, } switch (target.cpu.arch) { - .spirv, .spirv32, .spirv64 => return false, + .spirv32, .spirv64 => return false, else => {}, } return switch (backend) { @@ -252,7 +251,7 @@ pub fn supportsStackProtector(target: *const std.Target, backend: std.builtin.Co pub fn clangSupportsStackProtector(target: *const std.Target) bool { return switch (target.cpu.arch) { - .spirv, .spirv32, .spirv64 => return false, + .spirv32, .spirv64 => return false, else => true, }; } @@ -270,7 +269,7 @@ pub fn supportsReturnAddress(target: *const std.Target, optimize: std.builtin.Op // overhead that we would prefer to avoid in release builds. .wasm32, .wasm64 => target.os.tag == .emscripten and optimize == .Debug, .bpfel, .bpfeb => false, - .spirv, .spirv32, .spirv64 => false, + .spirv32, .spirv64 => false, else => true, }; } @@ -335,7 +334,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv else => {}, } switch (target.cpu.arch) { - .spirv, .spirv32, .spirv64 => return false, + .spirv32, .spirv64 => return false, // Remove this once https://github.com/ziglang/zig/issues/23714 is fixed .amdgcn => return false, else => {}, @@ -352,7 +351,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv pub fn canBuildLibUbsanRt(target: *const std.Target) bool { switch (target.cpu.arch) { - .spirv, .spirv32, .spirv64 => return false, + .spirv32, .spirv64 => return false, // Remove this once https://github.com/ziglang/zig/issues/23715 is fixed .nvptx, .nvptx64 => return false, else => return true, @@ -719,7 +718,6 @@ pub fn supportsFunctionAlignment(target: *const std.Target) bool { return switch (target.cpu.arch) { .nvptx, .nvptx64, - .spirv, .spirv32, .spirv64, .wasm32, @@ -816,8 +814,7 @@ pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.Compile .powerpc, .powerpcle, .powerpc64, .powerpc64le => .stage2_powerpc, .riscv64 => .stage2_riscv64, .sparc64 => .stage2_sparc64, - .spirv32 => if (target.os.tag == .opencl) .stage2_spirv else .other, - .spirv, .spirv64 => .stage2_spirv, + .spirv32, .spirv64 => .stage2_spirv, .wasm32, .wasm64 => .stage2_wasm, .x86 => .stage2_x86, .x86_64 => .stage2_x86_64, diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index abdc61153d..3a0b254aa2 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -389,7 +389,6 @@ fn testFunction() !void { // Avoid looking at `typeInfoFooAligned` on targets which don't support function alignment. switch (builtin.target.cpu.arch) { - .spirv, .spirv32, .spirv64, .wasm32, diff --git a/test/cases/compile_errors/function_alignment_on_unsupported_target.zig b/test/cases/compile_errors/function_alignment_on_unsupported_target.zig index e3ea1dd068..92ca1c8b37 100644 --- a/test/cases/compile_errors/function_alignment_on_unsupported_target.zig +++ b/test/cases/compile_errors/function_alignment_on_unsupported_target.zig @@ -2,6 +2,6 @@ export fn entry() align(64) void {} // error // backend=stage2 -// target=nvptx-cuda,nvptx64-cuda,spirv-vulkan,spirv32-opencl,spirv64-opencl,wasm32-freestanding,wasm64-freestanding +// target=nvptx-cuda,nvptx64-cuda,spirv32-opengl,spirv32-vulkan,spirv32-opencl,spirv64-opencl,spirv64-amdhsa,wasm32-freestanding,wasm64-freestanding // // :1:25: error: target does not support function alignment diff --git a/test/tests.zig b/test/tests.zig index 432557af06..0362233b3c 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1440,7 +1440,7 @@ const test_targets = blk: { .{ .target = std.Target.Query.parse(.{ .arch_os_abi = "spirv64-vulkan", - .cpu_features = "vulkan_v1_2+physical_storage_buffer+int64+float16+float64", + .cpu_features = "vulkan_v1_2+float16+float64", }) catch unreachable, .use_llvm = false, .use_lld = false, @@ -2527,7 +2527,7 @@ fn wouldUseLlvm(use_llvm: ?bool, query: std.Target.Query, optimize_mode: Optimiz const cpu_arch = query.cpu_arch orelse builtin.cpu.arch; switch (cpu_arch) { .x86_64 => if (std.Target.ptrBitWidth_arch_abi(cpu_arch, query.abi orelse .none) != 64) return true, - .spirv, .spirv32, .spirv64 => return false, + .spirv32, .spirv64 => return false, else => return true, } return false; diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig index f033195aac..cb6043f0c8 100644 --- a/tools/update_cpu_features.zig +++ b/tools/update_cpu_features.zig @@ -1105,11 +1105,6 @@ const targets = [_]ArchTarget{ .desc = "Enable Float64 capability", .deps = &.{"v1_0"}, }, - .{ - .zig_name = "matrix", - .desc = "Enable Matrix capability", - .deps = &.{"v1_0"}, - }, .{ .zig_name = "storage_push_constant16", .desc = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability", @@ -1120,52 +1115,32 @@ const targets = [_]ArchTarget{ .desc = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability", .deps = &.{"v1_5"}, }, - .{ - .zig_name = "kernel", - .desc = "Enable Kernel capability", - .deps = &.{"v1_0"}, - }, - .{ - .zig_name = "addresses", - .desc = "Enable Addresses capability", - .deps = &.{"v1_0"}, - }, .{ .zig_name = "generic_pointer", .desc = "Enable GenericPointer capability", - .deps = &.{ "v1_0", "addresses" }, + .deps = &.{"v1_0"}, }, .{ .zig_name = "vector16", .desc = "Enable Vector16 capability", - .deps = &.{ "v1_0", "kernel" }, - }, - .{ - .zig_name = "shader", - .desc = "Enable Shader capability", - .deps = &.{ "v1_0", "matrix" }, + .deps = &.{"v1_0"}, }, .{ .zig_name = "variable_pointers", .desc = "Enable SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability", .deps = &.{"v1_0"}, }, - .{ - .zig_name = "physical_storage_buffer", - .desc = "Enable SPV_KHR_variable_pointers extension and the (VariablePointers, VariablePointersStorageBuffer) capabilities", - .deps = &.{"v1_0"}, - }, }, .extra_cpus = &.{ .{ .llvm_name = null, .zig_name = "vulkan_v1_2", - .features = &.{ "v1_5", "shader" }, + .features = &.{"v1_5"}, }, .{ .llvm_name = null, .zig_name = "opencl_v2", - .features = &.{ "v1_2", "kernel", "addresses", "generic_pointer" }, + .features = &.{"v1_2"}, }, }, },