diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 9e7c49d1a5..80a3e7b07f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2195,6 +2195,7 @@ const DeclGen = struct { .mul_add => try self.airMulAdd(inst), + .splat => try self.airSplat(inst), .reduce, .reduce_optimized => try self.airReduce(inst), .shuffle => try self.airShuffle(inst), @@ -2603,6 +2604,7 @@ const DeclGen = struct { // Idk why spir-v doesn't have a dedicated abs() instruction in the base // instruction set. For now we're just going to negate and check to avoid // importing the extinst. + // TODO: Make this a call to compiler rt / ext inst const neg_id = self.spv.allocId(); const args = .{ .id_result_type = self.typeId(operand_scalar_ty_ref), @@ -2877,6 +2879,19 @@ const DeclGen = struct { return try wip.finalize(); } + fn airSplat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + if (self.liveness.isUnused(inst)) return null; + const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; + const operand_id = try self.resolve(ty_op.operand); + const result_ty = self.typeOfIndex(inst); + var wip = try self.elementWise(result_ty); + defer wip.deinit(); + for (wip.results) |*result_id| { + result_id.* = operand_id; + } + return try wip.finalize(); + } + fn airReduce(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; const mod = self.module; diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 26d60c337a..9aedac66e5 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -326,7 +326,6 @@ test "vector @splat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .macos)