mirror of
https://github.com/ziglang/zig.git
synced 2026-02-14 13:30:45 +00:00
Merge pull request #23158 from alichraghi/ali_spirv
spirv: miscellaneous stuff #2
This commit is contained in:
commit
5105c3c7fa
@ -10,18 +10,18 @@ pub const Feature = enum {
|
||||
v1_4,
|
||||
v1_5,
|
||||
v1_6,
|
||||
int8,
|
||||
int16,
|
||||
int64,
|
||||
float16,
|
||||
float64,
|
||||
addresses,
|
||||
matrix,
|
||||
storage_push_constant16,
|
||||
arbitrary_precision_integers,
|
||||
kernel,
|
||||
addresses,
|
||||
generic_pointer,
|
||||
vector16,
|
||||
shader,
|
||||
physical_storage_buffer,
|
||||
};
|
||||
|
||||
pub const featureSet = CpuFeature.FeatureSetFns(Feature).featureSet;
|
||||
@ -69,16 +69,6 @@ pub const all_features = blk: {
|
||||
.description = "Enable version 1.6",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_5}),
|
||||
};
|
||||
result[@intFromEnum(Feature.int8)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable Int8 capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
result[@intFromEnum(Feature.int16)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable Int16 capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
result[@intFromEnum(Feature.int64)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable Int64 capability",
|
||||
@ -94,11 +84,6 @@ pub const all_features = blk: {
|
||||
.description = "Enable Float64 capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
result[@intFromEnum(Feature.addresses)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable either the Addresses capability or, SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
result[@intFromEnum(Feature.matrix)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable Matrix capability",
|
||||
@ -109,11 +94,21 @@ pub const all_features = blk: {
|
||||
.description = "Enable SPV_KHR_16bit_storage extension and the StoragePushConstant16 capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_3}),
|
||||
};
|
||||
result[@intFromEnum(Feature.arbitrary_precision_integers)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable SPV_INTEL_arbitrary_precision_integers extension and the ArbitraryPrecisionIntegersINTEL capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_5}),
|
||||
};
|
||||
result[@intFromEnum(Feature.kernel)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable Kernel capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
result[@intFromEnum(Feature.addresses)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable Addresses capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
result[@intFromEnum(Feature.generic_pointer)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable GenericPointer capability",
|
||||
@ -129,6 +124,11 @@ pub const all_features = blk: {
|
||||
.description = "Enable Shader capability",
|
||||
.dependencies = featureSet(&[_]Feature{ .v1_0, .matrix }),
|
||||
};
|
||||
result[@intFromEnum(Feature.physical_storage_buffer)] = .{
|
||||
.llvm_name = null,
|
||||
.description = "Enable SPV_KHR_physical_storage_buffer extension and the PhysicalStorageBufferAddresses capability",
|
||||
.dependencies = featureSet(&[_]Feature{.v1_0}),
|
||||
};
|
||||
const ti = @typeInfo(Feature);
|
||||
for (&result, 0..) |*elem, i| {
|
||||
elem.index = i;
|
||||
@ -147,7 +147,7 @@ pub const cpu = struct {
|
||||
pub const vulkan_v1_2: CpuModel = .{
|
||||
.name = "vulkan_v1_2",
|
||||
.llvm_name = null,
|
||||
.features = featureSet(&[_]Feature{ .v1_5, .shader, .addresses }),
|
||||
.features = featureSet(&[_]Feature{ .v1_5, .shader, .physical_storage_buffer }),
|
||||
};
|
||||
|
||||
pub const opencl_v2: CpuModel = .{
|
||||
|
||||
@ -80,7 +80,8 @@ pub fn fragmentDepth(comptime ptr: *addrspace(.output) f32) void {
|
||||
/// Forms the main linkage for `input` and `output` address spaces.
|
||||
/// `ptr` must be a reference to variable or struct field.
|
||||
pub fn location(comptime ptr: anytype, comptime loc: u32) void {
|
||||
asm volatile ("OpDecorate %ptr Location $loc"
|
||||
asm volatile (
|
||||
\\OpDecorate %ptr Location $loc
|
||||
:
|
||||
: [ptr] "" (ptr),
|
||||
[loc] "c" (loc),
|
||||
@ -110,7 +111,8 @@ pub const Origin = enum(u32) {
|
||||
/// The coordinates appear to originate in the specified `origin`.
|
||||
/// Only valid with the `Fragment` calling convention.
|
||||
pub fn fragmentOrigin(comptime entry_point: anytype, comptime origin: Origin) void {
|
||||
asm volatile ("OpExecutionMode %entry_point $origin"
|
||||
asm volatile (
|
||||
\\OpExecutionMode %entry_point $origin
|
||||
:
|
||||
: [entry_point] "" (entry_point),
|
||||
[origin] "c" (@intFromEnum(origin)),
|
||||
@ -137,7 +139,8 @@ pub const DepthMode = enum(u32) {
|
||||
|
||||
/// Only valid with the `Fragment` calling convention.
|
||||
pub fn depthMode(comptime entry_point: anytype, comptime mode: DepthMode) void {
|
||||
asm volatile ("OpExecutionMode %entry_point $mode"
|
||||
asm volatile (
|
||||
\\OpExecutionMode %entry_point $mode
|
||||
:
|
||||
: [entry_point] "" (entry_point),
|
||||
[mode] "c" (mode),
|
||||
@ -147,7 +150,8 @@ pub fn depthMode(comptime entry_point: anytype, comptime mode: DepthMode) void {
|
||||
/// Indicates the workgroup size in the `x`, `y`, and `z` dimensions.
|
||||
/// Only valid with the `GLCompute` or `Kernel` calling conventions.
|
||||
pub fn workgroupSize(comptime entry_point: anytype, comptime size: @Vector(3, u32)) void {
|
||||
asm volatile ("OpExecutionMode %entry_point LocalSize %x %y %z"
|
||||
asm volatile (
|
||||
\\OpExecutionMode %entry_point LocalSize %x %y %z
|
||||
:
|
||||
: [entry_point] "" (entry_point),
|
||||
[x] "c" (size[0]),
|
||||
@ -159,7 +163,8 @@ pub fn workgroupSize(comptime entry_point: anytype, comptime size: @Vector(3, u3
|
||||
/// A hint to the client, which indicates the workgroup size in the `x`, `y`, and `z` dimensions.
|
||||
/// Only valid with the `GLCompute` or `Kernel` calling conventions.
|
||||
pub fn workgroupSizeHint(comptime entry_point: anytype, comptime size: @Vector(3, u32)) void {
|
||||
asm volatile ("OpExecutionMode %entry_point LocalSizeHint %x %y %z"
|
||||
asm volatile (
|
||||
\\OpExecutionMode %entry_point LocalSizeHint %x %y %z
|
||||
:
|
||||
: [entry_point] "" (entry_point),
|
||||
[x] "c" (size[0]),
|
||||
|
||||
77
src/Sema.zig
77
src/Sema.zig
@ -3648,7 +3648,7 @@ fn indexablePtrLen(
|
||||
const object_ty = sema.typeOf(object);
|
||||
const is_pointer_to = object_ty.isSinglePointer(zcu);
|
||||
const indexable_ty = if (is_pointer_to) object_ty.childType(zcu) else object_ty;
|
||||
try checkIndexable(sema, block, src, indexable_ty);
|
||||
try sema.checkIndexable(block, src, indexable_ty);
|
||||
const field_name = try zcu.intern_pool.getOrPutString(sema.gpa, pt.tid, "len", .no_embedded_nulls);
|
||||
return sema.fieldVal(block, src, object, field_name, src);
|
||||
}
|
||||
@ -10103,6 +10103,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
}
|
||||
try sema.requireRuntimeBlock(block, block.nodeOffset(inst_data.src_node), ptr_src);
|
||||
try sema.validateRuntimeValue(block, ptr_src, operand);
|
||||
try sema.checkLogicalPtrOperation(block, ptr_src, ptr_ty);
|
||||
if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
|
||||
return block.addBitCast(dest_ty, operand);
|
||||
}
|
||||
@ -16389,6 +16390,8 @@ fn analyzeArithmetic(
|
||||
};
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, runtime_src);
|
||||
try sema.checkLogicalPtrOperation(block, src, lhs_ty);
|
||||
try sema.checkLogicalPtrOperation(block, src, rhs_ty);
|
||||
const lhs_int = try block.addBitCast(.usize, lhs);
|
||||
const rhs_int = try block.addBitCast(.usize, rhs);
|
||||
const address = try block.addBinOp(.sub_wrap, lhs_int, rhs_int);
|
||||
@ -16620,24 +16623,7 @@ fn analyzePtrArithmetic(
|
||||
};
|
||||
|
||||
try sema.requireRuntimeBlock(block, op_src, runtime_src);
|
||||
|
||||
const target = zcu.getTarget();
|
||||
if (target_util.arePointersLogical(target, ptr_info.flags.address_space)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(op_src, "illegal pointer arithmetic on pointer of type '{}'", .{ptr_ty.fmt(pt)});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
|
||||
const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
|
||||
try sema.errNote(op_src, msg, "arithmetic cannot be performed on pointers with address space '{s}' on target {s}-{s} by compiler backend {s}", .{
|
||||
@tagName(ptr_info.flags.address_space),
|
||||
target.cpu.arch.genericName(),
|
||||
@tagName(target.os.tag),
|
||||
@tagName(backend),
|
||||
});
|
||||
|
||||
break :msg msg;
|
||||
});
|
||||
}
|
||||
try sema.checkLogicalPtrOperation(block, op_src, ptr_ty);
|
||||
|
||||
return block.addInst(.{
|
||||
.tag = air_tag,
|
||||
@ -22501,6 +22487,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
});
|
||||
}
|
||||
try sema.requireRuntimeBlock(block, src, operand_src);
|
||||
try sema.checkLogicalPtrOperation(block, src, ptr_ty);
|
||||
if (!is_vector or zcu.backendSupportsFeature(.all_vector_instructions)) {
|
||||
if (block.wantSafety() and (try elem_ty.hasRuntimeBitsSema(pt) or elem_ty.zigTypeTag(zcu) == .@"fn")) {
|
||||
if (!ptr_ty.isAllowzeroPtr(zcu)) {
|
||||
@ -23165,8 +23152,9 @@ fn ptrCastFull(
|
||||
|
||||
try sema.validateRuntimeValue(block, operand_src, operand);
|
||||
|
||||
const need_null_check = block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu);
|
||||
const need_align_check = block.wantSafety() and dest_align.compare(.gt, src_align);
|
||||
const can_cast_to_int = !target_util.arePointersLogical(zcu.getTarget(), operand_ty.ptrAddressSpace(zcu));
|
||||
const need_null_check = can_cast_to_int and block.wantSafety() and operand_ty.ptrAllowsZero(zcu) and !dest_ty.ptrAllowsZero(zcu);
|
||||
const need_align_check = can_cast_to_int and block.wantSafety() and dest_align.compare(.gt, src_align);
|
||||
|
||||
// `operand` might be a slice. If `need_operand_ptr`, we'll populate `operand_ptr` with the raw pointer.
|
||||
const need_operand_ptr = src_info.flags.size != .slice or // we already have it
|
||||
@ -23832,6 +23820,32 @@ fn checkPtrType(
|
||||
return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(pt)});
|
||||
}
|
||||
|
||||
fn checkLogicalPtrOperation(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
if (zcu.intern_pool.indexToKey(ty.toIntern()) == .ptr_type) {
|
||||
const target = zcu.getTarget();
|
||||
const as = ty.ptrAddressSpace(zcu);
|
||||
if (target_util.arePointersLogical(target, as)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(src, "illegal operation on logical pointer of type '{}'", .{ty.fmt(pt)});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(
|
||||
src,
|
||||
msg,
|
||||
"cannot perform arithmetic on pointers with address space '{s}' on target {s}-{s}",
|
||||
.{
|
||||
@tagName(as),
|
||||
target.cpu.arch.genericName(),
|
||||
@tagName(target.os.tag),
|
||||
},
|
||||
);
|
||||
break :msg msg;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn checkVectorElemType(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -28326,7 +28340,7 @@ fn elemPtr(
|
||||
.pointer => indexable_ptr_ty.childType(zcu),
|
||||
else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(pt)}),
|
||||
};
|
||||
try checkIndexable(sema, block, src, indexable_ty);
|
||||
try sema.checkIndexable(block, src, indexable_ty);
|
||||
|
||||
const elem_ptr = switch (indexable_ty.zigTypeTag(zcu)) {
|
||||
.array, .vector => try sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety),
|
||||
@ -28362,7 +28376,7 @@ fn elemPtrOneLayerOnly(
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
|
||||
try checkIndexable(sema, block, src, indexable_ty);
|
||||
try sema.checkIndexable(block, src, indexable_ty);
|
||||
|
||||
switch (indexable_ty.ptrSize(zcu)) {
|
||||
.slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety),
|
||||
@ -28376,6 +28390,8 @@ fn elemPtrOneLayerOnly(
|
||||
const elem_ptr = try ptr_val.ptrElem(index, pt);
|
||||
return Air.internedToRef(elem_ptr.toIntern());
|
||||
}
|
||||
|
||||
try sema.checkLogicalPtrOperation(block, src, indexable_ty);
|
||||
const result_ty = try indexable_ty.elemPtrType(null, pt);
|
||||
|
||||
return block.addPtrElemPtr(indexable, elem_index, result_ty);
|
||||
@ -28412,7 +28428,7 @@ fn elemVal(
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
|
||||
try checkIndexable(sema, block, src, indexable_ty);
|
||||
try sema.checkIndexable(block, src, indexable_ty);
|
||||
|
||||
// TODO in case of a vector of pointers, we need to detect whether the element
|
||||
// index is a scalar or vector instead of unconditionally casting to usize.
|
||||
@ -28438,6 +28454,7 @@ fn elemVal(
|
||||
return Air.internedToRef((try pt.getCoerced(elem_val, elem_ty)).toIntern());
|
||||
}
|
||||
|
||||
try sema.checkLogicalPtrOperation(block, src, indexable_ty);
|
||||
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
|
||||
},
|
||||
.one => {
|
||||
@ -28477,6 +28494,9 @@ fn validateRuntimeElemAccess(
|
||||
parent_ty: Type,
|
||||
parent_src: LazySrcLoc,
|
||||
) CompileError!void {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
|
||||
if (try elem_ty.comptimeOnlySema(sema.pt)) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(
|
||||
@ -28492,6 +28512,14 @@ fn validateRuntimeElemAccess(
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
|
||||
if (zcu.intern_pool.indexToKey(parent_ty.toIntern()) == .ptr_type) {
|
||||
const target = zcu.getTarget();
|
||||
const as = parent_ty.ptrAddressSpace(zcu);
|
||||
if (target_util.arePointersLogical(target, as)) {
|
||||
return sema.fail(block, elem_index_src, "cannot access element of logical pointer '{}'", .{parent_ty.fmt(pt)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn tupleFieldPtr(
|
||||
@ -31158,6 +31186,7 @@ fn coerceCompatiblePtrs(
|
||||
if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(zcu) and
|
||||
(try dest_ty.elemType2(zcu).hasRuntimeBitsSema(pt) or dest_ty.elemType2(zcu).zigTypeTag(zcu) == .@"fn"))
|
||||
{
|
||||
try sema.checkLogicalPtrOperation(block, inst_src, inst_ty);
|
||||
const actual_ptr = if (inst_ty.isSlice(zcu))
|
||||
try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty)
|
||||
else
|
||||
|
||||
@ -464,7 +464,7 @@ const NavGen = struct {
|
||||
|
||||
const zcu = self.pt.zcu;
|
||||
const ty = Type.fromInterned(zcu.intern_pool.typeOf(val));
|
||||
const decl_ptr_ty_id = try self.ptrType(ty, .Generic, .indirect);
|
||||
const decl_ptr_ty_id = try self.ptrType(ty, self.spvStorageClass(.generic), .indirect);
|
||||
|
||||
const spv_decl_index = blk: {
|
||||
const entry = try self.object.uav_link.getOrPut(self.object.gpa, .{ val, .Function });
|
||||
@ -581,18 +581,18 @@ const NavGen = struct {
|
||||
/// that size. In this case, multiple elements of the largest type should be used.
|
||||
/// The backing type will be chosen as the smallest supported integer larger or equal to it in number of bits.
|
||||
/// The result is valid to be used with OpTypeInt.
|
||||
/// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits).
|
||||
/// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers).
|
||||
/// TODO: Should the result of this function be cached?
|
||||
fn backingIntBits(self: *NavGen, bits: u16) ?u16 {
|
||||
// The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
|
||||
assert(bits != 0);
|
||||
|
||||
// 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
|
||||
if (self.spv.hasFeature(.arbitrary_precision_integers) and bits <= 32) return bits;
|
||||
|
||||
// We require Int8 and Int16 capabilities and benefit Int64 when available.
|
||||
// 32-bit integers are always supported (see spec, 2.16.1, Data rules).
|
||||
const ints = [_]struct { bits: u16, feature: ?Target.spirv.Feature }{
|
||||
.{ .bits = 8, .feature = .int8 },
|
||||
.{ .bits = 16, .feature = .int16 },
|
||||
.{ .bits = 8, .feature = null },
|
||||
.{ .bits = 16, .feature = null },
|
||||
.{ .bits = 32, .feature = null },
|
||||
.{ .bits = 64, .feature = .int64 },
|
||||
};
|
||||
@ -714,6 +714,7 @@ const NavGen = struct {
|
||||
const int_info = scalar_ty.intInfo(zcu);
|
||||
// Use backing bits so that negatives are sign extended
|
||||
const backing_bits = self.backingIntBits(int_info.bits).?; // Assertion failure means big int
|
||||
assert(backing_bits != 0); // u0 is comptime
|
||||
|
||||
const signedness: Signedness = switch (@typeInfo(@TypeOf(value))) {
|
||||
.int => |int| int.signedness,
|
||||
@ -721,35 +722,35 @@ const NavGen = struct {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const value64: u64 = switch (signedness) {
|
||||
.signed => @bitCast(@as(i64, @intCast(value))),
|
||||
.unsigned => @as(u64, @intCast(value)),
|
||||
const final_value: spec.LiteralContextDependentNumber = blk: {
|
||||
if (self.spv.hasFeature(.kernel)) {
|
||||
const value64: u64 = switch (signedness) {
|
||||
.signed => @bitCast(@as(i64, @intCast(value))),
|
||||
.unsigned => @as(u64, @intCast(value)),
|
||||
};
|
||||
|
||||
// Manually truncate the value to the right amount of bits.
|
||||
const truncated_value = if (backing_bits == 64)
|
||||
value64
|
||||
else
|
||||
value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1;
|
||||
|
||||
break :blk switch (backing_bits) {
|
||||
1...32 => .{ .uint32 = @truncate(truncated_value) },
|
||||
33...64 => .{ .uint64 = truncated_value },
|
||||
else => unreachable, // TODO: Large integer constants
|
||||
};
|
||||
}
|
||||
|
||||
break :blk switch (backing_bits) {
|
||||
1...32 => if (signedness == .signed) .{ .int32 = @intCast(value) } else .{ .uint32 = @intCast(value) },
|
||||
33...64 => if (signedness == .signed) .{ .int64 = value } else .{ .uint64 = value },
|
||||
else => unreachable, // TODO: Large integer constants
|
||||
};
|
||||
};
|
||||
|
||||
// Manually truncate the value to the right amount of bits.
|
||||
const truncated_value = if (backing_bits == 64)
|
||||
value64
|
||||
else
|
||||
value64 & (@as(u64, 1) << @intCast(backing_bits)) - 1;
|
||||
|
||||
const result_ty_id = try self.resolveType(scalar_ty, .indirect);
|
||||
const result_id = self.spv.allocId();
|
||||
|
||||
const section = &self.spv.sections.types_globals_constants;
|
||||
switch (backing_bits) {
|
||||
0 => unreachable, // u0 is comptime
|
||||
1...32 => try section.emit(self.spv.gpa, .OpConstant, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
.value = .{ .uint32 = @truncate(truncated_value) },
|
||||
}),
|
||||
33...64 => try section.emit(self.spv.gpa, .OpConstant, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
.value = .{ .uint64 = truncated_value },
|
||||
}),
|
||||
else => unreachable, // TODO: Large integer constants
|
||||
}
|
||||
const result_id = try self.spv.constant(result_ty_id, final_value);
|
||||
|
||||
if (!ty.isVector(zcu)) return result_id;
|
||||
return self.constructCompositeSplat(ty, result_id);
|
||||
@ -804,8 +805,6 @@ const NavGen = struct {
|
||||
return self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
|
||||
const section = &self.spv.sections.types_globals_constants;
|
||||
|
||||
const cacheable_id = cache: {
|
||||
switch (ip.indexToKey(val.toIntern())) {
|
||||
.int_type,
|
||||
@ -860,13 +859,7 @@ const NavGen = struct {
|
||||
80, 128 => unreachable, // TODO
|
||||
else => unreachable,
|
||||
};
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.spv.gpa, .OpConstant, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
.value = lit,
|
||||
});
|
||||
break :cache result_id;
|
||||
break :cache try self.spv.constant(result_ty_id, lit);
|
||||
},
|
||||
.err => |err| {
|
||||
const value = try pt.getErrorValue(err.name);
|
||||
@ -989,8 +982,17 @@ const NavGen = struct {
|
||||
},
|
||||
.struct_type => {
|
||||
const struct_type = zcu.typeToStruct(ty).?;
|
||||
|
||||
if (struct_type.layout == .@"packed") {
|
||||
return self.todo("packed struct constants", .{});
|
||||
// TODO: composite int
|
||||
// TODO: endianness
|
||||
const bits: u16 = @intCast(ty.bitSize(zcu));
|
||||
const bytes = std.mem.alignForward(u16, self.backingIntBits(bits).?, 8) / 8;
|
||||
var limbs: [8]u8 = undefined;
|
||||
@memset(&limbs, 0);
|
||||
val.writeToPackedMemory(ty, pt, limbs[0..bytes], 0) catch unreachable;
|
||||
const backing_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
|
||||
return try self.constInt(backing_ty, @as(u64, @bitCast(limbs)));
|
||||
}
|
||||
|
||||
var types = std.ArrayList(Type).init(self.gpa);
|
||||
@ -1022,6 +1024,11 @@ const NavGen = struct {
|
||||
else => unreachable,
|
||||
},
|
||||
.un => |un| {
|
||||
if (un.tag == .none) {
|
||||
assert(ty.containerLayout(zcu) == .@"packed"); // TODO
|
||||
const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
|
||||
return try self.constant(int_ty, Value.fromInterned(un.val), .direct);
|
||||
}
|
||||
const active_field = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[active_field]);
|
||||
@ -1354,7 +1361,7 @@ const NavGen = struct {
|
||||
const union_obj = zcu.typeToUnion(ty).?;
|
||||
|
||||
if (union_obj.flagsUnordered(ip).layout == .@"packed") {
|
||||
return self.todo("packed union types", .{});
|
||||
return try self.intType(.unsigned, @intCast(ty.bitSize(zcu)));
|
||||
}
|
||||
|
||||
const layout = self.unionLayout(ty);
|
||||
@ -1366,7 +1373,7 @@ const NavGen = struct {
|
||||
var member_types: [4]IdRef = undefined;
|
||||
var member_names: [4][]const u8 = undefined;
|
||||
|
||||
const u8_ty_id = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled?
|
||||
const u8_ty_id = try self.resolveType(Type.u8, .direct);
|
||||
|
||||
if (layout.tag_size != 0) {
|
||||
const tag_ty_id = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
|
||||
@ -2821,6 +2828,7 @@ const NavGen = struct {
|
||||
/// TODO is to also write out the error as a function call parameter, and to somehow fetch
|
||||
/// the name of an error in the text executor.
|
||||
fn generateTestEntryPoint(self: *NavGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
|
||||
const zcu = self.pt.zcu;
|
||||
const target = self.spv.target;
|
||||
|
||||
const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct);
|
||||
@ -2950,7 +2958,7 @@ const NavGen = struct {
|
||||
.pointer = p_error_id,
|
||||
.object = error_id,
|
||||
.memory_access = .{
|
||||
.Aligned = .{ .literal_integer = @sizeOf(u16) },
|
||||
.Aligned = .{ .literal_integer = @intCast(Type.abiAlignment(.anyerror, zcu).toByteUnits().?) },
|
||||
},
|
||||
});
|
||||
try section.emit(self.spv.gpa, .OpReturn, {});
|
||||
@ -3223,10 +3231,13 @@ const NavGen = struct {
|
||||
};
|
||||
|
||||
fn load(self: *NavGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
|
||||
const zcu = self.pt.zcu;
|
||||
const alignment: u32 = @intCast(value_ty.abiAlignment(zcu).toByteUnits().?);
|
||||
const indirect_value_ty_id = try self.resolveType(value_ty, .indirect);
|
||||
const result_id = self.spv.allocId();
|
||||
const access = spec.MemoryAccess.Extended{
|
||||
.Volatile = options.is_volatile,
|
||||
.Aligned = .{ .literal_integer = alignment },
|
||||
};
|
||||
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
|
||||
.id_result_type = indirect_value_ty_id,
|
||||
@ -4229,7 +4240,7 @@ const NavGen = struct {
|
||||
defer self.gpa.free(ids);
|
||||
|
||||
const result_id = self.spv.allocId();
|
||||
if (self.spv.hasFeature(.kernel)) {
|
||||
if (self.spv.hasFeature(.addresses)) {
|
||||
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
@ -4308,6 +4319,7 @@ const NavGen = struct {
|
||||
) !Temporary {
|
||||
const pt = self.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const scalar_ty = lhs.ty.scalarType(zcu);
|
||||
const is_vector = lhs.ty.isVector(zcu);
|
||||
|
||||
@ -4318,6 +4330,11 @@ const NavGen = struct {
|
||||
const ty = lhs.ty.intTagType(zcu);
|
||||
return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
|
||||
},
|
||||
.@"struct" => {
|
||||
const struct_ty = zcu.typeToPackedStruct(scalar_ty).?;
|
||||
const ty = Type.fromInterned(struct_ty.backingIntTypeUnordered(ip));
|
||||
return try self.cmp(op, lhs.pun(ty), rhs.pun(ty));
|
||||
},
|
||||
.error_set => {
|
||||
assert(!is_vector);
|
||||
const err_int_ty = try pt.errorIntType();
|
||||
@ -4745,8 +4762,42 @@ const NavGen = struct {
|
||||
switch (result_ty.zigTypeTag(zcu)) {
|
||||
.@"struct" => {
|
||||
if (zcu.typeToPackedStruct(result_ty)) |struct_type| {
|
||||
_ = struct_type;
|
||||
unreachable; // TODO
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
|
||||
var running_int_id = try self.constInt(backing_int_ty, 0);
|
||||
var running_bits: u16 = 0;
|
||||
for (struct_type.field_types.get(ip), elements) |field_ty_ip, element| {
|
||||
const field_ty = Type.fromInterned(field_ty_ip);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
const field_id = try self.resolve(element);
|
||||
const ty_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
|
||||
const field_int_ty = try self.pt.intType(.unsigned, ty_bit_size);
|
||||
const field_int_id = blk: {
|
||||
if (field_ty.isPtrAtRuntime(zcu)) {
|
||||
assert(self.spv.hasFeature(.addresses) or
|
||||
(self.spv.hasFeature(.physical_storage_buffer) and field_ty.ptrAddressSpace(zcu) == .storage_buffer));
|
||||
break :blk try self.intFromPtr(field_id);
|
||||
}
|
||||
break :blk try self.bitCast(field_int_ty, field_ty, field_id);
|
||||
};
|
||||
const shift_rhs = try self.constInt(backing_int_ty, running_bits);
|
||||
const extended_int_conv = try self.buildIntConvert(backing_int_ty, .{
|
||||
.ty = field_int_ty,
|
||||
.value = .{ .singleton = field_int_id },
|
||||
});
|
||||
const shifted = try self.buildBinary(.sll, extended_int_conv, .{
|
||||
.ty = backing_int_ty,
|
||||
.value = .{ .singleton = shift_rhs },
|
||||
});
|
||||
const running_int_tmp = try self.buildBinary(
|
||||
.bit_or,
|
||||
.{ .ty = backing_int_ty, .value = .{ .singleton = running_int_id } },
|
||||
shifted,
|
||||
);
|
||||
running_int_id = try running_int_tmp.materialize(self);
|
||||
running_bits += ty_bit_size;
|
||||
}
|
||||
return running_int_id;
|
||||
}
|
||||
|
||||
const types = try self.gpa.alloc(Type, elements.len);
|
||||
@ -5087,11 +5138,33 @@ const NavGen = struct {
|
||||
const union_ty = zcu.typeToUnion(ty).?;
|
||||
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
|
||||
|
||||
if (union_ty.flagsUnordered(ip).layout == .@"packed") {
|
||||
unreachable; // TODO
|
||||
}
|
||||
|
||||
const layout = self.unionLayout(ty);
|
||||
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
|
||||
|
||||
if (union_ty.flagsUnordered(ip).layout == .@"packed") {
|
||||
if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
const int_ty = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
|
||||
return self.constInt(int_ty, 0);
|
||||
}
|
||||
|
||||
assert(payload != null);
|
||||
if (payload_ty.isInt(zcu)) {
|
||||
if (ty.bitSize(zcu) == payload_ty.bitSize(zcu)) {
|
||||
return self.bitCast(ty, payload_ty, payload.?);
|
||||
}
|
||||
|
||||
const trunc = try self.buildIntConvert(ty, .{ .ty = payload_ty, .value = .{ .singleton = payload.? } });
|
||||
return try trunc.materialize(self);
|
||||
}
|
||||
|
||||
const payload_int_ty = try pt.intType(.unsigned, @intCast(payload_ty.bitSize(zcu)));
|
||||
const payload_int = if (payload_ty.ip_index == .bool_type)
|
||||
try self.convertToIndirect(payload_ty, payload.?)
|
||||
else
|
||||
try self.bitCast(payload_int_ty, payload_ty, payload.?);
|
||||
const trunc = try self.buildIntConvert(ty, .{ .ty = payload_int_ty, .value = .{ .singleton = payload_int } });
|
||||
return try trunc.materialize(self);
|
||||
}
|
||||
|
||||
const tag_int = if (layout.tag_size != 0) blk: {
|
||||
const tag_val = try pt.enumValueFieldIndex(tag_ty, active_field);
|
||||
@ -5112,7 +5185,6 @@ const NavGen = struct {
|
||||
try self.store(tag_ty, ptr_id, tag_id, .{});
|
||||
}
|
||||
|
||||
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
|
||||
if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
|
||||
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function, .indirect);
|
||||
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
|
||||
@ -5167,11 +5239,51 @@ const NavGen = struct {
|
||||
|
||||
switch (object_ty.zigTypeTag(zcu)) {
|
||||
.@"struct" => switch (object_ty.containerLayout(zcu)) {
|
||||
.@"packed" => unreachable, // TODO
|
||||
.@"packed" => {
|
||||
const struct_ty = zcu.typeToPackedStruct(object_ty).?;
|
||||
const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index);
|
||||
const bit_offset_id = try self.constInt(.u16, bit_offset);
|
||||
const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
|
||||
const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
|
||||
const field_int_ty = try pt.intType(signedness, field_bit_size);
|
||||
const shift_lhs: Temporary = .{ .ty = object_ty, .value = .{ .singleton = object_id } };
|
||||
const shift = try self.buildBinary(.srl, shift_lhs, .{ .ty = .u16, .value = .{ .singleton = bit_offset_id } });
|
||||
const mask_id = try self.constInt(object_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1);
|
||||
const masked = try self.buildBinary(.bit_and, shift, .{ .ty = object_ty, .value = .{ .singleton = mask_id } });
|
||||
const result_id = blk: {
|
||||
if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(object_ty.bitSize(zcu))).?)
|
||||
break :blk try self.bitCast(field_int_ty, object_ty, try masked.materialize(self));
|
||||
const trunc = try self.buildIntConvert(field_int_ty, masked);
|
||||
break :blk try trunc.materialize(self);
|
||||
};
|
||||
if (field_ty.ip_index == .bool_type) return try self.convertToDirect(.bool, result_id);
|
||||
if (field_ty.isInt(zcu)) return result_id;
|
||||
return try self.bitCast(field_ty, field_int_ty, result_id);
|
||||
},
|
||||
else => return try self.extractField(field_ty, object_id, field_index),
|
||||
},
|
||||
.@"union" => switch (object_ty.containerLayout(zcu)) {
|
||||
.@"packed" => unreachable, // TODO
|
||||
.@"packed" => {
|
||||
const backing_int_ty = try pt.intType(.unsigned, @intCast(object_ty.bitSize(zcu)));
|
||||
const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
|
||||
const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
|
||||
const int_ty = try pt.intType(signedness, field_bit_size);
|
||||
const mask_id = try self.constInt(backing_int_ty, (@as(u64, 1) << @as(u6, @intCast(field_bit_size))) - 1);
|
||||
const masked = try self.buildBinary(
|
||||
.bit_and,
|
||||
.{ .ty = backing_int_ty, .value = .{ .singleton = object_id } },
|
||||
.{ .ty = backing_int_ty, .value = .{ .singleton = mask_id } },
|
||||
);
|
||||
const result_id = blk: {
|
||||
if (self.backingIntBits(field_bit_size).? == self.backingIntBits(@intCast(backing_int_ty.bitSize(zcu))).?)
|
||||
break :blk try self.bitCast(int_ty, backing_int_ty, try masked.materialize(self));
|
||||
const trunc = try self.buildIntConvert(int_ty, masked);
|
||||
break :blk try trunc.materialize(self);
|
||||
};
|
||||
if (field_ty.ip_index == .bool_type) return try self.convertToDirect(.bool, result_id);
|
||||
if (field_ty.isInt(zcu)) return result_id;
|
||||
return try self.bitCast(field_ty, int_ty, result_id);
|
||||
},
|
||||
else => {
|
||||
// Store, ptr-elem-ptr, pointer-cast, load
|
||||
const layout = self.unionLayout(object_ty);
|
||||
@ -5252,28 +5364,28 @@ const NavGen = struct {
|
||||
return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
|
||||
},
|
||||
},
|
||||
.@"union" => switch (object_ty.containerLayout(zcu)) {
|
||||
.@"packed" => return self.todo("implement field access for packed unions", .{}),
|
||||
else => {
|
||||
const layout = self.unionLayout(object_ty);
|
||||
if (!layout.has_payload) {
|
||||
// Asked to get a pointer to a zero-sized field. Just lower this
|
||||
// to undefined, there is no reason to make it be a valid pointer.
|
||||
return try self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
.@"union" => {
|
||||
const layout = self.unionLayout(object_ty);
|
||||
if (!layout.has_payload) {
|
||||
// Asked to get a pointer to a zero-sized field. Just lower this
|
||||
// to undefined, there is no reason to make it be a valid pointer.
|
||||
return try self.spv.constUndef(result_ty_id);
|
||||
}
|
||||
|
||||
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu));
|
||||
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class, .indirect);
|
||||
const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
|
||||
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(zcu));
|
||||
const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class, .indirect);
|
||||
const pl_ptr_id = blk: {
|
||||
if (object_ty.containerLayout(zcu) == .@"packed") break :blk object_ptr;
|
||||
break :blk try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
|
||||
};
|
||||
|
||||
const active_pl_ptr_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = active_pl_ptr_id,
|
||||
.operand = pl_ptr_id,
|
||||
});
|
||||
return active_pl_ptr_id;
|
||||
},
|
||||
const active_pl_ptr_id = self.spv.allocId();
|
||||
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = active_pl_ptr_id,
|
||||
.operand = pl_ptr_id,
|
||||
});
|
||||
return active_pl_ptr_id;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -5292,7 +5404,7 @@ const NavGen = struct {
|
||||
/// The final storage class of the pointer. This may be either `.Generic` or `.Function`.
|
||||
/// In either case, the local is allocated in the `.Function` storage class, and optionally
|
||||
/// cast back to `.Generic`.
|
||||
storage_class: StorageClass = .Generic,
|
||||
storage_class: StorageClass,
|
||||
};
|
||||
|
||||
// Allocate a function-local variable, with possible initializer.
|
||||
@ -5332,9 +5444,10 @@ const NavGen = struct {
|
||||
fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef {
|
||||
const zcu = self.pt.zcu;
|
||||
const ptr_ty = self.typeOfIndex(inst);
|
||||
assert(ptr_ty.ptrAddressSpace(zcu) == .generic);
|
||||
const child_ty = ptr_ty.childType(zcu);
|
||||
return try self.alloc(child_ty, .{});
|
||||
return try self.alloc(child_ty, .{
|
||||
.storage_class = self.spvStorageClass(ptr_ty.ptrAddressSpace(zcu)),
|
||||
});
|
||||
}
|
||||
|
||||
fn airArg(self: *NavGen) IdRef {
|
||||
|
||||
@ -368,6 +368,40 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
|
||||
});
|
||||
break :blk result_id;
|
||||
},
|
||||
.OpTypeStruct => blk: {
|
||||
const ids = try self.gpa.alloc(IdRef, operands[1..].len);
|
||||
defer self.gpa.free(ids);
|
||||
for (operands[1..], ids) |op, *id| id.* = try self.resolveRefId(op.ref_id);
|
||||
const result_id = self.spv.allocId();
|
||||
try self.spv.structType(result_id, ids, null);
|
||||
break :blk result_id;
|
||||
},
|
||||
.OpTypeImage => blk: {
|
||||
const sampled_type = try self.resolveRefId(operands[1].ref_id);
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.gpa, .OpTypeImage, .{
|
||||
.id_result = result_id,
|
||||
.sampled_type = sampled_type,
|
||||
.dim = @enumFromInt(operands[2].value),
|
||||
.depth = operands[3].literal32,
|
||||
.arrayed = operands[4].literal32,
|
||||
.ms = operands[5].literal32,
|
||||
.sampled = operands[6].literal32,
|
||||
.image_format = @enumFromInt(operands[7].value),
|
||||
});
|
||||
break :blk result_id;
|
||||
},
|
||||
.OpTypeSampler => blk: {
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.gpa, .OpTypeSampler, .{ .id_result = result_id });
|
||||
break :blk result_id;
|
||||
},
|
||||
.OpTypeSampledImage => blk: {
|
||||
const image_type = try self.resolveRefId(operands[1].ref_id);
|
||||
const result_id = self.spv.allocId();
|
||||
try section.emit(self.gpa, .OpTypeSampledImage, .{ .id_result = result_id, .image_type = image_type });
|
||||
break :blk result_id;
|
||||
},
|
||||
.OpTypeFunction => blk: {
|
||||
const param_operands = operands[2..];
|
||||
const return_type = try self.resolveRefId(operands[1].ref_id);
|
||||
@ -406,18 +440,18 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
|
||||
else => switch (self.inst.opcode) {
|
||||
.OpEntryPoint => unreachable,
|
||||
.OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes,
|
||||
.OpVariable => switch (@as(spec.StorageClass, @enumFromInt(operands[2].value))) {
|
||||
.Function => &self.func.prologue,
|
||||
.Input, .Output => section: {
|
||||
maybe_spv_decl_index = try self.spv.allocDecl(.global);
|
||||
try self.func.decl_deps.put(self.spv.gpa, maybe_spv_decl_index.?, {});
|
||||
// TODO: In theory this can be non-empty if there is an initializer which depends on another global...
|
||||
try self.spv.declareDeclDeps(maybe_spv_decl_index.?, &.{});
|
||||
.OpVariable => section: {
|
||||
const storage_class: spec.StorageClass = @enumFromInt(operands[2].value);
|
||||
if (storage_class == .Function) break :section &self.func.prologue;
|
||||
maybe_spv_decl_index = try self.spv.allocDecl(.global);
|
||||
if (self.spv.version.minor < 4 and storage_class != .Input and storage_class != .Output) {
|
||||
// Before version 1.4, the interface’s storage classes are limited to the Input and Output
|
||||
break :section &self.spv.sections.types_globals_constants;
|
||||
},
|
||||
// These don't need to be marked in the dependency system.
|
||||
// Probably we should add them anyway, then filter out PushConstant globals.
|
||||
else => &self.spv.sections.types_globals_constants,
|
||||
}
|
||||
try self.func.decl_deps.put(self.spv.gpa, maybe_spv_decl_index.?, {});
|
||||
// TODO: In theory this can be non-empty if there is an initializer which depends on another global...
|
||||
try self.spv.declareDeclDeps(maybe_spv_decl_index.?, &.{});
|
||||
break :section &self.spv.sections.types_globals_constants;
|
||||
},
|
||||
// Default case - to be worked out further.
|
||||
else => &self.func.body,
|
||||
|
||||
@ -333,8 +333,6 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word {
|
||||
// Versions
|
||||
.v1_0, .v1_1, .v1_2, .v1_3, .v1_4, .v1_5, .v1_6 => {},
|
||||
// Features with no dependencies
|
||||
.int8 => try self.addCapability(.Int8),
|
||||
.int16 => try self.addCapability(.Int16),
|
||||
.int64 => try self.addCapability(.Int64),
|
||||
.float16 => try self.addCapability(.Float16),
|
||||
.float64 => try self.addCapability(.Float64),
|
||||
@ -343,21 +341,27 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word {
|
||||
try self.addExtension("SPV_KHR_16bit_storage");
|
||||
try self.addCapability(.StoragePushConstant16);
|
||||
},
|
||||
.addresses => if (self.hasFeature(.shader)) {
|
||||
try self.addExtension("SPV_KHR_physical_storage_buffer");
|
||||
try self.addCapability(.PhysicalStorageBufferAddresses);
|
||||
} else {
|
||||
try self.addCapability(.Addresses);
|
||||
.arbitrary_precision_integers => {
|
||||
try self.addExtension("SPV_INTEL_arbitrary_precision_integers");
|
||||
try self.addCapability(.ArbitraryPrecisionIntegersINTEL);
|
||||
},
|
||||
.addresses => try self.addCapability(.Addresses),
|
||||
// Kernel
|
||||
.kernel => try self.addCapability(.Kernel),
|
||||
.generic_pointer => try self.addCapability(.GenericPointer),
|
||||
.vector16 => try self.addCapability(.Vector16),
|
||||
// Shader
|
||||
.shader => try self.addCapability(.Shader),
|
||||
.physical_storage_buffer => {
|
||||
try self.addExtension("SPV_KHR_physical_storage_buffer");
|
||||
try self.addCapability(.PhysicalStorageBufferAddresses);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
// These are well supported
|
||||
try self.addCapability(.Int8);
|
||||
try self.addCapability(.Int16);
|
||||
|
||||
// Emit memory model
|
||||
const addressing_model: spec.AddressingModel = blk: {
|
||||
@ -610,6 +614,17 @@ pub fn functionType(self: *Module, return_ty_id: IdRef, param_type_ids: []const
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn constant(self: *Module, result_ty_id: IdRef, value: spec.LiteralContextDependentNumber) !IdRef {
|
||||
const result_id = self.allocId();
|
||||
const section = &self.sections.types_globals_constants;
|
||||
try section.emit(self.gpa, .OpConstant, .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
.value = value,
|
||||
});
|
||||
return result_id;
|
||||
}
|
||||
|
||||
pub fn constBool(self: *Module, value: bool) !IdRef {
|
||||
if (self.cache.bool_const[@intFromBool(value)]) |b| return b;
|
||||
|
||||
|
||||
@ -140,6 +140,8 @@ test {
|
||||
|
||||
// This bug only repros in the root file
|
||||
test "deference @embedFile() of a file full of zero bytes" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const contents = @embedFile("behavior/zero.bin").*;
|
||||
try @import("std").testing.expect(contents.len == 456);
|
||||
for (contents) |byte| try @import("std").testing.expect(byte == 0);
|
||||
|
||||
@ -165,7 +165,6 @@ test "@bitCast packed structs at runtime and comptime" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const Full = packed struct {
|
||||
number: u16,
|
||||
@ -226,7 +225,6 @@ test "bitcast packed struct to integer and back" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const LevelUpMove = packed struct {
|
||||
move_id: u9,
|
||||
|
||||
@ -22,7 +22,6 @@ test "coerce i8 to i32 and @intCast back" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var x: i8 = -5;
|
||||
var y: i32 = -5;
|
||||
@ -36,8 +35,6 @@ test "coerce i8 to i32 and @intCast back" {
|
||||
}
|
||||
|
||||
test "coerce non byte-sized integers accross 32bits boundary" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
{
|
||||
var v: u21 = 6417;
|
||||
_ = &v;
|
||||
@ -217,7 +214,6 @@ test "load non byte-sized value in union" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -25,7 +25,6 @@ const PackedUnion = packed union {
|
||||
|
||||
test "packed struct, enum, union parameters in extern function" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
testPackedStuff(&(PackedStruct{
|
||||
.a = 1,
|
||||
|
||||
@ -133,6 +133,7 @@ test "cmp f16" {
|
||||
}
|
||||
|
||||
test "cmp f32" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
@ -142,6 +143,7 @@ test "cmp f32" {
|
||||
}
|
||||
|
||||
test "cmp f64" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
|
||||
@ -245,6 +247,7 @@ test "vector cmp f16" {
|
||||
}
|
||||
|
||||
test "vector cmp f32" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
@ -69,6 +69,8 @@ test "global loads can affect liveness" {
|
||||
}
|
||||
|
||||
test "global const can be self-referential" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
self: *const @This(),
|
||||
x: u32,
|
||||
@ -113,6 +115,8 @@ test "global var can be self-referential" {
|
||||
}
|
||||
|
||||
test "global const can be indirectly self-referential" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
other: *const @This(),
|
||||
x: u32,
|
||||
|
||||
@ -123,7 +123,6 @@ test "correct sizeOf and offsets in packed structs" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const PStruct = packed struct {
|
||||
bool_a: bool,
|
||||
@ -191,7 +190,6 @@ test "nested packed structs" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S1 = packed struct { a: u8, b: u8, c: u8 };
|
||||
|
||||
@ -257,7 +255,6 @@ test "nested packed struct unaligned" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
|
||||
|
||||
const S1 = packed struct {
|
||||
@ -895,7 +892,6 @@ test "packed struct passed to callconv(.c) function" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -944,7 +940,6 @@ test "packed struct initialized in bitcast" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const T = packed struct { val: u8 };
|
||||
@ -982,7 +977,6 @@ test "pointer to container level packed struct field" {
|
||||
test "store undefined to packed result location" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
var x: u4 = 0;
|
||||
@ -992,8 +986,6 @@ test "store undefined to packed result location" {
|
||||
}
|
||||
|
||||
test "bitcast back and forth" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
// Originally reported at https://github.com/ziglang/zig/issues/9914
|
||||
const S = packed struct { one: u6, two: u1 };
|
||||
const s = S{ .one = 0b110101, .two = 0b1 };
|
||||
@ -1290,8 +1282,6 @@ test "2-byte packed struct argument in C calling convention" {
|
||||
}
|
||||
|
||||
test "packed struct contains optional pointer" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const foo: packed struct {
|
||||
a: ?*@This() = null,
|
||||
} = .{};
|
||||
@ -1299,8 +1289,6 @@ test "packed struct contains optional pointer" {
|
||||
}
|
||||
|
||||
test "packed struct equality" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const Foo = packed struct {
|
||||
a: u4,
|
||||
b: u4,
|
||||
@ -1321,8 +1309,6 @@ test "packed struct equality" {
|
||||
}
|
||||
|
||||
test "packed struct with signed field" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var s: packed struct {
|
||||
a: i2,
|
||||
b: u6,
|
||||
|
||||
@ -137,7 +137,6 @@ test "packed union initialized with a runtime value" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const Fields = packed struct {
|
||||
@ -174,8 +173,6 @@ test "assigning to non-active field at comptime" {
|
||||
}
|
||||
|
||||
test "comptime packed union of pointers" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const U = packed union {
|
||||
a: *const u32,
|
||||
b: *const [1]u32,
|
||||
|
||||
@ -9,7 +9,6 @@ test "packed struct explicit backing integer" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S1 = packed struct { a: u8, b: u8, c: u8 };
|
||||
|
||||
|
||||
@ -287,8 +287,6 @@ test "@ptrCast undefined value at comptime" {
|
||||
}
|
||||
|
||||
test "comptime @ptrCast with packed struct leaves value unmodified" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = packed struct { three: u3 };
|
||||
const st: S = .{ .three = 6 };
|
||||
try expect(st.three == 6);
|
||||
|
||||
@ -3,6 +3,8 @@ const builtin = @import("builtin");
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
|
||||
test "casting integer address to function pointer" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
addressToFunction();
|
||||
comptime addressToFunction();
|
||||
}
|
||||
|
||||
@ -233,6 +233,8 @@ test "@sizeOf comparison against zero" {
|
||||
}
|
||||
|
||||
test "hardcoded address in typeof expression" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn func() @TypeOf(@as(*[]u8, @ptrFromInt(0x10)).*[0]) {
|
||||
return 0;
|
||||
|
||||
@ -1023,7 +1023,6 @@ test "packed struct with undefined initializers" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const P = packed struct {
|
||||
@ -1221,7 +1220,6 @@ test "packed struct aggregate init" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -1971,7 +1969,6 @@ test "struct field default value is a call" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const Z = packed struct {
|
||||
a: u32,
|
||||
|
||||
@ -1372,14 +1372,13 @@ test "packed union in packed struct" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = packed struct {
|
||||
nested: packed union {
|
||||
val: usize,
|
||||
val: u16,
|
||||
foo: u32,
|
||||
},
|
||||
bar: u32,
|
||||
bar: u16,
|
||||
|
||||
fn unpack(self: @This()) usize {
|
||||
return self.nested.foo;
|
||||
@ -1460,7 +1459,6 @@ test "packed union with zero-bit field" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = packed struct {
|
||||
nested: packed union {
|
||||
@ -1479,7 +1477,6 @@ test "packed union with zero-bit field" {
|
||||
test "reinterpreting enum value inside packed union" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const U = packed union {
|
||||
tag: enum(u8) { a, b },
|
||||
@ -1527,7 +1524,6 @@ test "defined-layout union field pointer has correct alignment" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest(comptime U: type) !void {
|
||||
@ -1901,8 +1897,6 @@ test "inner struct initializer uses union layout" {
|
||||
}
|
||||
|
||||
test "inner struct initializer uses packed union layout" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const namespace = struct {
|
||||
const U = packed union {
|
||||
a: packed struct {
|
||||
@ -1946,8 +1940,6 @@ test "extern union initialized via reintepreted struct field initializer" {
|
||||
}
|
||||
|
||||
test "packed union initialized via reintepreted struct field initializer" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
|
||||
|
||||
const U = packed union {
|
||||
@ -1988,8 +1980,6 @@ test "store of comptime reinterpreted memory to extern union" {
|
||||
}
|
||||
|
||||
test "store of comptime reinterpreted memory to packed union" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
|
||||
|
||||
const U = packed union {
|
||||
|
||||
@ -0,0 +1,52 @@
|
||||
export fn elemPtr() void {
|
||||
var ptr: [*]u8 = undefined;
|
||||
ptr[0] = 0;
|
||||
}
|
||||
|
||||
export fn elemVal() void {
|
||||
var ptr: [*]u8 = undefined;
|
||||
var val = ptr[0];
|
||||
_ = &ptr;
|
||||
_ = &val;
|
||||
}
|
||||
|
||||
export fn intFromPtr() void {
|
||||
var value: u8 = 0;
|
||||
_ = @intFromPtr(&value);
|
||||
}
|
||||
|
||||
export fn ptrFromInt() void {
|
||||
var v: u32 = 0x1234;
|
||||
var ptr: *u8 = @ptrFromInt(v);
|
||||
_ = &v;
|
||||
_ = &ptr;
|
||||
}
|
||||
|
||||
export fn ptrPtrArithmetic() void {
|
||||
var value0: u8 = 0;
|
||||
var value1: u8 = 0;
|
||||
_ = &value0 - &value1;
|
||||
}
|
||||
|
||||
export fn ptrIntArithmetic() void {
|
||||
var ptr0: [*]u8 = undefined;
|
||||
_ = &ptr0;
|
||||
_ = ptr0 - 10;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=spirv64-vulkan
|
||||
//
|
||||
// :3:8: error: illegal operation on logical pointer of type '[*]u8'
|
||||
// :3:8: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan
|
||||
// :8:18: error: illegal operation on logical pointer of type '[*]u8'
|
||||
// :8:18: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan
|
||||
// :15:21: error: illegal operation on logical pointer of type '*u8'
|
||||
// :15:21: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan
|
||||
// :20:20: error: illegal operation on logical pointer of type '*u8'
|
||||
// :20:20: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan
|
||||
// :28:17: error: illegal operation on logical pointer of type '*u8'
|
||||
// :28:17: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan
|
||||
// :34:14: error: illegal operation on logical pointer of type '[*]u8'
|
||||
// :34:14: note: cannot perform arithmetic on pointers with address space 'generic' on target spirv-vulkan
|
||||
@ -143,7 +143,7 @@ const test_targets = blk: {
|
||||
.{
|
||||
.target = std.Target.Query.parse(.{
|
||||
.arch_os_abi = "spirv64-vulkan",
|
||||
.cpu_features = "vulkan_v1_2+int8+int16+int64+float16+float64",
|
||||
.cpu_features = "vulkan_v1_2+int64+float16+float64",
|
||||
}) catch unreachable,
|
||||
.use_llvm = false,
|
||||
.use_lld = false,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user