From cda8f65489af099a5a93a7475f344c615678aa26 Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sat, 19 Mar 2022 19:38:43 -0700 Subject: [PATCH 01/12] behavior tests: use `expect` instead of `expectEqual` in vector.zig --- test/behavior/vector.zig | 72 +++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 7efd018a65..697969f42e 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -3,8 +3,6 @@ const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const expect = std.testing.expect; -const expectEqual = std.testing.expectEqual; -const expectApproxEqRel = std.testing.expectApproxEqRel; const Vector = std.meta.Vector; test "implicit cast vector to array - bool" { @@ -118,10 +116,16 @@ test "implicit cast vector to array" { test "array to vector" { if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - var foo: f32 = 3.14; - var arr = [4]f32{ foo, 1.5, 0.0, 0.0 }; - var vec: Vector(4, f32) = arr; - _ = vec; + const S = struct { + fn doTheTest() !void { + var foo: f32 = 3.14; + var arr = [4]f32{ foo, 1.5, 0.0, 0.0 }; + var vec: Vector(4, f32) = arr; + try expect(mem.eql(f32, &@as([4]f32, vec), &arr)); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); } test "vector casts of sizes not divisible by 8" { @@ -160,9 +164,9 @@ test "vector @splat" { fn testForT(comptime N: comptime_int, v: anytype) !void { const T = @TypeOf(v); var vec = @splat(N, v); - try expectEqual(Vector(N, T), @TypeOf(vec)); + try expect(Vector(N, T) == @TypeOf(vec)); var as_array = @as([N]T, vec); - for (as_array) |elem| try expectEqual(v, elem); + for (as_array) |elem| try expect(v == elem); } fn doTheTest() !void { // Splats with multiple-of-8 bit types that fill a 128bit vector. @@ -294,27 +298,27 @@ test "vector comparison operators" { { const v1: Vector(4, bool) = [_]bool{ true, false, true, false }; const v2: Vector(4, bool) = [_]bool{ false, true, false, true }; - try expectEqual(@splat(4, true), v1 == v1); - try expectEqual(@splat(4, false), v1 == v2); - try expectEqual(@splat(4, true), v1 != v2); - try expectEqual(@splat(4, false), v2 != v2); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 == v1))); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v1 == v2))); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 != v2))); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v2 != v2))); } { const v1 = @splat(4, @as(u32, 0xc0ffeeee)); const v2: Vector(4, c_uint) = v1; const v3 = @splat(4, @as(u32, 0xdeadbeef)); - try expectEqual(@splat(4, true), v1 == v2); - try expectEqual(@splat(4, false), v1 == v3); - try expectEqual(@splat(4, true), v1 != v3); - try expectEqual(@splat(4, false), v1 != v2); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 == v2))); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v1 == v3))); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 != v3))); + try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v1 != v2))); } { // Comptime-known LHS/RHS var v1: @Vector(4, u32) = [_]u32{ 2, 1, 2, 1 }; const v2 = @splat(4, @as(u32, 2)); const v3: @Vector(4, bool) = [_]bool{ true, false, true, false }; - try expectEqual(v3, v1 == v2); - try expectEqual(v3, v2 == v1); + try expect(mem.eql(bool, &@as([4]bool, v3), &@as([4]bool, v1 == v2))); + try expect(mem.eql(bool, &@as([4]bool, v3), &@as([4]bool, v2 == v1))); } } }; @@ -329,20 +333,20 @@ test "vector division operators" { if (!comptime std.meta.trait.isSignedInt(T)) { const d0 = x / y; for (@as([4]T, d0)) |v, i| { - try expectEqual(x[i] / y[i], v); + try expect(x[i] / y[i] == v); } } const d1 = @divExact(x, y); for (@as([4]T, d1)) |v, i| { - try expectEqual(@divExact(x[i], y[i]), v); + try expect(@divExact(x[i], y[i]) == v); } const d2 = @divFloor(x, y); for (@as([4]T, d2)) |v, i| { - try expectEqual(@divFloor(x[i], y[i]), v); + try expect(@divFloor(x[i], y[i]) == v); } const d3 = @divTrunc(x, y); for (@as([4]T, d3)) |v, i| { - try expectEqual(@divTrunc(x[i], y[i]), v); + try expect(@divTrunc(x[i], y[i]) == v); } } @@ -350,16 +354,16 @@ test "vector division operators" { if ((!comptime std.meta.trait.isSignedInt(T)) and @typeInfo(T) != .Float) { const r0 = x % y; for (@as([4]T, r0)) |v, i| { - try expectEqual(x[i] % y[i], v); + try expect(x[i] % y[i] == v); } } const r1 = @mod(x, y); for (@as([4]T, r1)) |v, i| { - try expectEqual(@mod(x[i], y[i]), v); + try expect(@mod(x[i], y[i]) == v); } const r2 = @rem(x, y); for (@as([4]T, r2)) |v, i| { - try expectEqual(@rem(x[i], y[i]), v); + try expect(@rem(x[i], y[i]) == v); } } @@ -411,7 +415,7 @@ test "vector bitwise not operator" { fn doTheTestNot(comptime T: type, x: Vector(4, T)) !void { var y = ~x; for (@as([4]T, y)) |v, i| { - try expectEqual(~x[i], v); + try expect(~x[i] == v); } } fn doTheTest() !void { @@ -444,11 +448,11 @@ test "vector shift operators" { var z0 = xv >> yv; for (@as([N]TX, z0)) |v, i| { - try expectEqual(x[i] >> y[i], v); + try expect(x[i] >> y[i] == v); } var z1 = xv << yv; for (@as([N]TX, z1)) |v, i| { - try expectEqual(x[i] << y[i], v); + try expect(x[i] << y[i] == v); } } fn doTheTestShiftExact(x: anytype, y: anytype, dir: enum { Left, Right }) !void { @@ -462,7 +466,7 @@ test "vector shift operators" { var z = if (dir == .Left) @shlExact(xv, yv) else @shrExact(xv, yv); for (@as([N]TX, z)) |v, i| { const check = if (dir == .Left) x[i] << y[i] else x[i] >> y[i]; - try expectEqual(check, v); + try expect(check == v); } } fn doTheTest() !void { @@ -681,9 +685,9 @@ test "saturating add" { const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 255, 254, 1 } +| u8x3{ 1, 2, 255 })); + try expect(mem.eql(u8, &@as([3]u8, u8x3{ 255, 255, 255 }), &@as([3]u8, u8x3{ 255, 254, 1 } +| u8x3{ 1, 2, 255 }))); const i8x3 = std.meta.Vector(3, i8); - try expectEqual(i8x3{ 127, 127, 127 }, (i8x3{ 127, 126, 1 } +| i8x3{ 1, 2, 127 })); + try expect(mem.eql(i8, &@as([3]i8, i8x3{ 127, 127, 127 }), &@as([3]i8, i8x3{ 127, 126, 1 } +| i8x3{ 1, 2, 127 }))); } }; try S.doTheTest(); @@ -695,7 +699,7 @@ test "saturating subtraction" { const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 0, 0, 0 }, (u8x3{ 0, 0, 0 } -| u8x3{ 255, 255, 255 })); + try expect(mem.eql(u8, &@as([3]u8, u8x3{ 0, 0, 0 }), &@as([3]u8, u8x3{ 0, 0, 0 } -| u8x3{ 255, 255, 255 }))); } }; try S.doTheTest(); @@ -710,7 +714,7 @@ test "saturating multiplication" { const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 2, 2, 2 } *| u8x3{ 255, 255, 255 })); + try expect(mem.eql(u8, &@as([3]u8, u8x3{ 255, 255, 255 }), &@as([3]u8, u8x3{ 2, 2, 2 } *| u8x3{ 255, 255, 255 }))); } }; @@ -723,7 +727,7 @@ test "saturating shift-left" { const S = struct { fn doTheTest() !void { const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 255, 255, 255 } <<| u8x3{ 1, 1, 1 })); + try expect(mem.eql(u8, &@as([3]u8, u8x3{ 255, 255, 255 }), &@as([3]u8, u8x3{ 255, 255, 255 } <<| u8x3{ 1, 1, 1 }))); } }; try S.doTheTest(); From 862e63f535ec8d65e33ed7ea67eb9cf03bfc7d6a Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sat, 19 Mar 2022 23:33:43 -0700 Subject: [PATCH 02/12] stage2: fix typo in print_air.zig --- src/print_air.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/print_air.zig b/src/print_air.zig index fb970e3b08..f3ccc70a46 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -470,8 +470,8 @@ const Writer = struct { } fn writeFieldParentPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const pl_op = w.air.instructions.items(.data)[inst].ty_pl; - const extra = w.air.extraData(Air.FieldParentPtr, pl_op.payload).data; + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const extra = w.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; try w.writeOperand(s, inst, 0, extra.field_ptr); try s.print(", {d}", .{extra.field_index}); From 0f4830704171b734bb4a0235fc14809282457dc3 Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sat, 19 Mar 2022 23:44:55 -0700 Subject: [PATCH 03/12] stage2: add AIR instruction `cmp_vector` The existing `cmp_*` instructions get their result type from `lhs`, but vector comparison will always return a vector of bools with only the length derived from its operands. This necessitates the creation of a new AIR instruction. --- src/Air.zig | 19 +++++++++++++++++++ src/Liveness.zig | 4 ++++ src/Sema.zig | 14 ++++++++++++++ src/arch/aarch64/CodeGen.zig | 6 ++++++ src/arch/arm/CodeGen.zig | 7 ++++++- src/arch/riscv64/CodeGen.zig | 6 ++++++ src/arch/wasm/CodeGen.zig | 6 ++++++ src/arch/x86_64/CodeGen.zig | 6 ++++++ src/codegen/c.zig | 2 ++ src/codegen/llvm.zig | 6 ++++++ src/print_air.zig | 11 +++++++++++ 11 files changed, 86 insertions(+), 1 deletion(-) diff --git a/src/Air.zig b/src/Air.zig index 2d717f442d..8c24108abd 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -308,6 +308,10 @@ pub const Inst = struct { /// `!=`. Result type is always bool. /// Uses the `bin_op` field. cmp_neq, + /// Conditional between two vectors. + /// Result type is always a vector of bools. + /// Uses the `ty_pl` field, payload is `VectorCmp`. + cmp_vector, /// Conditional branch. /// Result type is always noreturn; no instructions in a block follow this one. @@ -781,6 +785,20 @@ pub const Shuffle = struct { mask_len: u32, }; +pub const VectorCmp = struct { + lhs: Inst.Ref, + rhs: Inst.Ref, + op: u32, + + pub fn compareOperator(self: VectorCmp) std.math.CompareOperator { + return @intToEnum(std.math.CompareOperator, @truncate(u3, self.op)); + } + + pub fn encodeOp(compare_operator: std.math.CompareOperator) u32 { + return @enumToInt(compare_operator); + } +}; + /// Trailing: /// 0. `Inst.Ref` for every outputs_len /// 1. `Inst.Ref` for every inputs_len @@ -942,6 +960,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .aggregate_init, .union_init, .field_parent_ptr, + .cmp_vector, => return air.getRefType(datas[inst].ty_pl.ty), .not, diff --git a/src/Liveness.zig b/src/Liveness.zig index dd93d44f72..79521e7a94 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -441,6 +441,10 @@ fn analyzeInst( const reduce = inst_datas[inst].reduce; return trackOperands(a, new_set, inst, main_tomb, .{ reduce.operand, .none, .none }); }, + .cmp_vector => { + const extra = a.air.extraData(Air.VectorCmp, inst_datas[inst].ty_pl.payload).data; + return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none }); + }, .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); diff --git a/src/Sema.zig b/src/Sema.zig index 7674121ba6..20622cb98a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -397,6 +397,20 @@ pub const Block = struct { }); } + fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator, vector_ty: Air.Inst.Ref) !Air.Inst.Ref { + return block.addInst(.{ + .tag = .cmp_vector, + .data = .{ .ty_pl = .{ + .ty = vector_ty, + .payload = try block.sema.addExtra(Air.VectorCmp{ + .lhs = lhs, + .rhs = rhs, + .op = Air.VectorCmp.encodeOp(cmp_op), + }), + } }, + }); + } + fn addAggregateInit( block: *Block, aggregate_ty: Type, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index c9121c8859..9c2d5890a7 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -577,6 +577,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_gte => try self.airCmp(inst, .gte), .cmp_gt => try self.airCmp(inst, .gt), .cmp_neq => try self.airCmp(inst, .neq), + .cmp_vector => try self.airCmpVector(inst), .bool_and => try self.airBinOp(inst), .bool_or => try self.airBinOp(inst), @@ -2713,6 +2714,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); +} + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 921bc92c72..0b9f9eb2e2 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -567,6 +567,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_gte => try self.airCmp(inst, .gte), .cmp_gt => try self.airCmp(inst, .gt), .cmp_neq => try self.airCmp(inst, .neq), + .cmp_vector => try self.airCmpVector(inst), .bool_and => try self.airBinOp(inst), .bool_or => try self.airBinOp(inst), @@ -2894,7 +2895,6 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const lhs_ty = self.air.typeOf(bin_op.lhs); switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM cmp vectors", .{}), .Optional => return self.fail("TODO ARM cmp optionals", .{}), .Float => return self.fail("TODO ARM cmp floats", .{}), .Int, .Bool, .Pointer, .ErrorSet, .Enum => { @@ -2929,6 +2929,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); +} + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 14beedd2a9..3b73fef51f 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -537,6 +537,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_gte => try self.airCmp(inst, .gte), .cmp_gt => try self.airCmp(inst, .gt), .cmp_neq => try self.airCmp(inst, .neq), + .cmp_vector => try self.airCmpVector(inst), .bool_and => try self.airBoolOp(inst), .bool_or => try self.airBoolOp(inst), @@ -1791,6 +1792,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { // return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); +} + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index c60c321572..8d515e9365 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1309,6 +1309,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) !WValue { .cmp_lte => self.airCmp(inst, .lte), .cmp_lt => self.airCmp(inst, .lt), .cmp_neq => self.airCmp(inst, .neq), + .cmp_vector => self.airCmpVector(inst), .array_elem_val => self.airArrayElemVal(inst), .array_to_slice => self.airArrayToSlice(inst), @@ -2222,6 +2223,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner return cmp_tmp; } +fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!WValue { + _ = inst; + return self.fail("TODO implement airCmpVector for wasm", .{}); +} + fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 72f12cf4e9..598511a631 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -658,6 +658,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_gte => try self.airCmp(inst, .gte), .cmp_gt => try self.airCmp(inst, .gt), .cmp_neq => try self.airCmp(inst, .neq), + .cmp_vector => try self.airCmpVector(inst), .bool_and => try self.airBoolOp(inst), .bool_or => try self.airBoolOp(inst), @@ -3699,6 +3700,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch}); +} + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const payload = try self.addExtra(Mir.DbgLineColumn{ diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 23ccdc007b..f5a1036479 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1715,6 +1715,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .cmp_eq => try airEquality(f, inst, "((", "=="), .cmp_neq => try airEquality(f, inst, "!((", "!="), + .cmp_vector => return f.fail("TODO: C backend: implement binary op for tag '{s}'", .{@tagName(Air.Inst.Tag.cmp_vector)}), + // bool_and and bool_or are non-short-circuit operations .bool_and => try airBinOp(f, inst, " & "), .bool_or => try airBinOp(f, inst, " | "), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 139c1f25cb..edf8992c56 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3375,6 +3375,7 @@ pub const FuncGen = struct { .cmp_lt => try self.airCmp(inst, .lt), .cmp_lte => try self.airCmp(inst, .lte), .cmp_neq => try self.airCmp(inst, .neq), + .cmp_vector => try self.airCmpVector(inst), .is_non_null => try self.airIsNonNull(inst, false, false, .NE), .is_non_null_ptr => try self.airIsNonNull(inst, true , false, .NE), @@ -3640,6 +3641,11 @@ pub const FuncGen = struct { return self.cmp(lhs, rhs, operand_ty, op); } + fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + _ = inst; + return self.todo("implement airCmpVector"); + } + fn cmp( self: *FuncGen, lhs: *const llvm.Value, diff --git a/src/print_air.zig b/src/print_air.zig index f3ccc70a46..c6b27a6cd6 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -266,6 +266,7 @@ const Writer = struct { .mul_add => try w.writeMulAdd(s, inst), .shuffle => try w.writeShuffle(s, inst), .reduce => try w.writeReduce(s, inst), + .cmp_vector => try w.writeCmpVector(s, inst), .add_with_overflow, .sub_with_overflow, @@ -402,6 +403,16 @@ const Writer = struct { try s.print(", {s}", .{@tagName(reduce.operation)}); } + fn writeCmpVector(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const extra = w.air.extraData(Air.VectorCmp, ty_pl.payload).data; + + try s.print("{s}, ", .{@tagName(extra.compareOperator())}); + try w.writeOperand(s, inst, 0, extra.lhs); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, extra.rhs); + } + fn writeFence(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const atomic_order = w.air.instructions.items(.data)[inst].fence; From b96699059c6224b0a425f2fe5010469ef6049cad Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sat, 19 Mar 2022 23:49:53 -0700 Subject: [PATCH 04/12] stage2: implement `cmp_vector` for LLVM backend --- src/codegen/llvm.zig | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index edf8992c56..a90a2eeda3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3642,8 +3642,17 @@ pub const FuncGen = struct { } fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { - _ = inst; - return self.todo("implement airCmpVector"); + if (self.liveness.isUnused(inst)) return null; + + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data; + + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + const vec_ty = self.air.typeOf(extra.lhs); + const cmp_op = extra.compareOperator(); + + return self.cmp(lhs, rhs, vec_ty, cmp_op); } fn cmp( @@ -3656,9 +3665,10 @@ pub const FuncGen = struct { var int_buffer: Type.Payload.Bits = undefined; var opt_buffer: Type.Payload.ElemType = undefined; - const int_ty = switch (operand_ty.zigTypeTag()) { - .Enum => operand_ty.intTagType(&int_buffer), - .Int, .Bool, .Pointer, .ErrorSet => operand_ty, + const scalar_ty = operand_ty.scalarType(); + const int_ty = switch (scalar_ty.zigTypeTag()) { + .Enum => scalar_ty.intTagType(&int_buffer), + .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.isPtrLikeOptional()) { From 961248cde32e0d572225c6c1d367b960540ae220 Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sat, 19 Mar 2022 23:57:29 -0700 Subject: [PATCH 05/12] stage2: make more instructions vector-compatible in LLVM backend --- src/codegen/llvm.zig | 119 +++++++++++++++++++++++++++++-------------- 1 file changed, 80 insertions(+), 39 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index a90a2eeda3..13f4f7d01b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3960,10 +3960,11 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); + const operand_scalar_ty = operand_ty.scalarType(); const dest_ty = self.air.typeOfIndex(inst); const dest_llvm_ty = try self.dg.llvmType(dest_ty); - if (operand_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt()) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); @@ -3977,11 +3978,12 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); + const dest_scalar_ty = dest_ty.scalarType(); const dest_llvm_ty = try self.dg.llvmType(dest_ty); // TODO set fast math flag - if (dest_ty.isSignedInt()) { + if (dest_scalar_ty.isSignedInt()) { return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); } else { return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); @@ -4912,9 +4914,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isAnyFloat()) return self.builder.buildFAdd(lhs, rhs, ""); - if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.builder.buildFAdd(lhs, rhs, ""); + if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } @@ -4935,9 +4938,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (inst_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); + if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); return self.builder.buildUAddSat(lhs, rhs, ""); } @@ -4949,9 +4953,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isAnyFloat()) return self.builder.buildFSub(lhs, rhs, ""); - if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.builder.buildFSub(lhs, rhs, ""); + if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } @@ -4972,9 +4977,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (inst_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); + if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); return self.builder.buildUSubSat(lhs, rhs, ""); } @@ -4985,9 +4991,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isAnyFloat()) return self.builder.buildFMul(lhs, rhs, ""); - if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.builder.buildFMul(lhs, rhs, ""); + if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } @@ -5008,9 +5015,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (inst_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); + if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); + if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); return self.builder.buildUMulFixSat(lhs, rhs, ""); } @@ -5031,12 +5039,13 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isRuntimeFloat()) { + if (scalar_ty.isRuntimeFloat()) { const result = self.builder.buildFDiv(lhs, rhs, ""); return self.callTrunc(result, inst_ty); } - if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } @@ -5047,12 +5056,13 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isRuntimeFloat()) { + if (scalar_ty.isRuntimeFloat()) { const result = self.builder.buildFDiv(lhs, rhs, ""); return try self.callFloor(result, inst_ty); } - if (inst_ty.isSignedInt()) { + if (scalar_ty.isSignedInt()) { // const d = @divTrunc(a, b); // const r = @rem(a, b); // return if (r == 0) d else d - ((a < 0) ^ (b < 0)); @@ -5078,9 +5088,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, ""); - if (inst_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); + if (scalar_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); return self.builder.buildExactUDiv(lhs, rhs, ""); } @@ -5091,9 +5102,10 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isRuntimeFloat()) return self.builder.buildFRem(lhs, rhs, ""); - if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); + if (scalar_ty.isRuntimeFloat()) return self.builder.buildFRem(lhs, rhs, ""); + if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } @@ -5105,8 +5117,9 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); const inst_llvm_ty = try self.dg.llvmType(inst_ty); + const scalar_ty = inst_ty.scalarType(); - if (inst_ty.isRuntimeFloat()) { + if (scalar_ty.isRuntimeFloat()) { const a = self.builder.buildFRem(lhs, rhs, ""); const b = self.builder.buildFAdd(a, rhs, ""); const c = self.builder.buildFRem(b, rhs, ""); @@ -5114,7 +5127,7 @@ pub const FuncGen = struct { const ltz = self.builder.buildFCmp(.OLT, lhs, zero, ""); return self.builder.buildSelect(ltz, c, a, ""); } - if (inst_ty.isSignedInt()) { + if (scalar_ty.isSignedInt()) { const a = self.builder.buildSRem(lhs, rhs, ""); const b = self.builder.buildNSWAdd(a, rhs, ""); const c = self.builder.buildSRem(b, rhs, ""); @@ -5339,15 +5352,22 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); + + const lhs_ty = self.air.typeOf(bin_op.lhs); + const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(); + const tg = self.dg.module.getTarget(); - const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + + const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") else rhs; - if (lhs_type.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); + if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); return self.builder.buildNUWShl(lhs, casted_rhs, ""); } @@ -5355,11 +5375,18 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); + const lhs_type = self.air.typeOf(bin_op.lhs); + const rhs_type = self.air.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_type.scalarType(); + const rhs_scalar_ty = rhs_type.scalarType(); + const tg = self.dg.module.getTarget(); - const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) + + const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; @@ -5370,31 +5397,45 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); + + const lhs_ty = self.air.typeOf(bin_op.lhs); + const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(); + const tg = self.dg.module.getTarget(); - const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + + const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") else rhs; - if (lhs_type.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, ""); + if (lhs_scalar_ty.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, ""); return self.builder.buildUShlSat(lhs, casted_rhs, ""); } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*const llvm.Value { - if (self.liveness.isUnused(inst)) - return null; + if (self.liveness.isUnused(inst)) return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); + + const lhs_ty = self.air.typeOf(bin_op.lhs); + const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_scalar_ty = lhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(); + const tg = self.dg.module.getTarget(); - const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + + const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") else rhs; - const is_signed_int = self.air.typeOfIndex(inst).isSignedInt(); + const is_signed_int = lhs_scalar_ty.isSignedInt(); if (is_exact) { if (is_signed_int) { From 4e357151a54c9a2f373cef59e2718a61cfbe3658 Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sun, 20 Mar 2022 00:00:13 -0700 Subject: [PATCH 06/12] stage2: align store for vector-to-array bitcast in LLVM backend This was causing a very rare segfault when LLVM would emit `vmovdqa` using an unaligned memory operand on the stack. --- src/codegen/llvm.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 13f4f7d01b..107b059765 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5563,7 +5563,8 @@ pub const FuncGen = struct { if (bitcast_ok) { const llvm_vector_ty = try self.dg.llvmType(operand_ty); const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), ""); - _ = self.builder.buildStore(operand, casted_ptr); + const llvm_store = self.builder.buildStore(operand, casted_ptr); + llvm_store.setAlignment(inst_ty.abiAlignment(target)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. From 2d8fef5680dc8218596e701f91d918c4aa7215bc Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sun, 20 Mar 2022 00:06:23 -0700 Subject: [PATCH 07/12] stage2: make bool binop AIR return types based on operand type This allows vector-of-bools operands to return a vector-of-bools. --- src/Air.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 8c24108abd..e0f765ddc0 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -904,6 +904,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .shl_sat, .min, .max, + .bool_and, + .bool_or, => return air.typeOf(datas[inst].bin_op.lhs), .sqrt, @@ -935,8 +937,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .is_non_err, .is_err_ptr, .is_non_err_ptr, - .bool_and, - .bool_or, => return Type.initTag(.bool), .const_ty => return Type.initTag(.type), From 3f4676901a8c02d9d7069b284aa848d685c3975c Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sun, 20 Mar 2022 00:29:44 -0700 Subject: [PATCH 08/12] stage2: return `Value.zero` when truncating int to 0 bits at comptime --- src/value.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/value.zig b/src/value.zig index 76ab20c7ab..c5e082485a 100644 --- a/src/value.zig +++ b/src/value.zig @@ -3359,6 +3359,8 @@ pub const Value = extern union { } pub fn intTrunc(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value { + if (bits == 0) return Value.zero; + var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space); From afdcfb005ea32849f30e2abd7361ce1f33d0ee74 Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sun, 20 Mar 2022 00:38:12 -0700 Subject: [PATCH 09/12] Sema: make most instructions vector-agnostic Made most `Value` functions require a `Type`. If the provided type is a vector, then automatically vectorize the operation and return with another vector. The Sema side can then automatically become vectorized with minimal changes. There are already a few manually vectorized instructions, but we can simplify those later. --- src/Sema.zig | 513 ++++++++++++++++++--------------- src/value.zig | 771 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 1006 insertions(+), 278 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 20622cb98a..500dec5246 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2105,7 +2105,7 @@ fn zirEnumDecl( }); } else if (any_values) { const tag_val = if (last_tag_val) |val| - try val.intAdd(Value.one, sema.arena) + try val.intAdd(Value.one, enum_obj.tag_ty, sema.arena) else Value.zero; last_tag_val = tag_val; @@ -8192,14 +8192,22 @@ fn zirShl( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); + const target = sema.mod.getTarget(); + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); + + const scalar_ty = lhs_ty.scalarType(); + const scalar_rhs_ty = rhs_ty.scalarType(); // TODO coerce rhs if air_tag is not shl_sat - const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, sema.typeOf(rhs)); + const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); @@ -8213,35 +8221,31 @@ fn zirShl( } } - const lhs_ty = sema.typeOf(lhs); - const rhs_ty = sema.typeOf(rhs); - const target = sema.mod.getTarget(); - const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse break :rs rhs_src; const val = switch (air_tag) { .shl_exact => val: { - const shifted = try lhs_val.shl(rhs_val, sema.arena); - if (lhs_ty.zigTypeTag() == .ComptimeInt) { + const shifted = try lhs_val.shl(rhs_val, lhs_ty, sema.arena); + if (scalar_ty.zigTypeTag() == .ComptimeInt) { break :val shifted; } - const int_info = lhs_ty.intInfo(target); - const truncated = try shifted.intTrunc(sema.arena, int_info.signedness, int_info.bits); - if (truncated.compareHetero(.eq, shifted)) { + const int_info = scalar_ty.intInfo(target); + const truncated = try shifted.intTrunc(lhs_ty, sema.arena, int_info.signedness, int_info.bits); + if (truncated.compare(.eq, shifted, lhs_ty)) { break :val shifted; } return sema.addConstUndef(lhs_ty); }, - .shl_sat => if (lhs_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, sema.arena) + .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena) else try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, target), - .shl => if (lhs_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, sema.arena) + .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena) else try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, target), @@ -8256,7 +8260,7 @@ fn zirShl( const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or - rhs_ty.intInfo(target).bits > lhs_ty.intInfo(target).bits) + scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits) { const max_int = try sema.addConstant( lhs_ty, @@ -8283,15 +8287,18 @@ fn zirShr( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const runtime_src = if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| rs: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { - const lhs_ty = sema.typeOf(lhs); if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(lhs_ty); } @@ -8301,13 +8308,12 @@ fn zirShr( } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. - const bits = @intCast(u16, rhs_val.toUnsignedInt()); - const truncated = try lhs_val.intTrunc(sema.arena, .unsigned, bits); + const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val); if (!truncated.compareWithZero(.eq)) { return sema.addConstUndef(lhs_ty); } } - const val = try lhs_val.shr(rhs_val, sema.arena); + const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena); return sema.addConstant(lhs_ty, val); } else { // Even if lhs is not comptime known, we can still deduce certain things based @@ -8342,32 +8348,15 @@ fn zirBitwise( const rhs = sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); - const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - - const scalar_type = if (resolved_type.zigTypeTag() == .Vector) - resolved_type.elemType() - else - resolved_type; - + const scalar_type = resolved_type.scalarType(); const scalar_tag = scalar_type.zigTypeTag(); - if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { - if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { - return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ - lhs_ty.arrayLen(), - rhs_ty.arrayLen(), - }); - } - } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { - return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs_ty, - rhs_ty, - }); - } + const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; @@ -8377,16 +8366,13 @@ fn zirBitwise( if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { - if (resolved_type.zigTypeTag() == .Vector) { - return sema.fail(block, src, "TODO implement zirBitwise for vectors at comptime", .{}); - } const result_val = switch (air_tag) { - .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena), - .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena), - .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena), + .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena), + .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena), + .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena), else => unreachable, }; - return sema.addConstant(scalar_type, result_val); + return sema.addConstant(resolved_type, result_val); } } @@ -8413,9 +8399,9 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); if (val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag() == .Vector) { - const vec_len = try sema.usizeCast(block, operand_src, operand_type.arrayLen()); + const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems) |*elem, i| { @@ -8427,8 +8413,8 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. try Value.Tag.aggregate.create(sema.arena, elems), ); } else { - const result_val = try val.bitwiseNot(scalar_type, sema.arena, target); - return sema.addConstant(scalar_type, result_val); + const result_val = try val.bitwiseNot(operand_type, sema.arena, target); + return sema.addConstant(operand_type, result_val); } } @@ -8780,8 +8766,19 @@ fn zirNegate( const src = inst_data.src(); const lhs_src = src; const rhs_src = src; // TODO better source location - const lhs = sema.resolveInst(.zero); + const rhs = sema.resolveInst(inst_data.operand); + const rhs_ty = sema.typeOf(rhs); + const rhs_scalar_ty = rhs_ty.scalarType(); + + if (tag_override == .sub and rhs_scalar_ty.isUnsignedInt()) { + return sema.fail(block, src, "negation of type '{}'", .{rhs_ty}); + } + + const lhs = if (rhs_ty.zigTypeTag() == .Vector) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + else + sema.resolveInst(.zero); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } @@ -8999,18 +8996,8 @@ fn analyzeArithmetic( const rhs_ty = sema.typeOf(rhs); const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); - if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { - if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { - return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ - lhs_ty.arrayLen(), rhs_ty.arrayLen(), - }); - } - return sema.fail(block, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{}); - } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { - return sema.fail(block, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs_ty, rhs_ty, - }); - } + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); + if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { .One, .Slice => {}, .Many, .C => { @@ -9033,15 +9020,13 @@ fn analyzeArithmetic( const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, }); + const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_type = if (resolved_type.zigTypeTag() == .Vector) - resolved_type.elemType() - else - resolved_type; - - const scalar_tag = scalar_type.zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(); + const scalar_tag = resolved_type.scalarType().zigTypeTag(); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; @@ -9075,7 +9060,7 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } } if (rhs_val.compareWithZero(.eq)) { @@ -9087,19 +9072,19 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( - scalar_type, - try lhs_val.intAdd(rhs_val, sema.arena), + resolved_type, + try lhs_val.intAdd(rhs_val, resolved_type, sema.arena), ); } else { return sema.addConstant( - scalar_type, - try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatAdd(rhs_val, resolved_type, sema.arena, target), ); } } else break :rs .{ .src = rhs_src, .air_tag = .add }; @@ -9116,15 +9101,15 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( - scalar_type, - try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.numberAddWrap(rhs_val, resolved_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .addwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .addwrap }; @@ -9140,18 +9125,18 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intAdd(rhs_val, sema.arena) + try lhs_val.intAdd(rhs_val, resolved_type, sema.arena) else - try lhs_val.intAddSat(rhs_val, scalar_type, sema.arena, target); + try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, target); - return sema.addConstant(scalar_type, val); + return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .add_sat }; } else break :rs .{ .src = rhs_src, .air_tag = .add_sat }; }, @@ -9168,7 +9153,7 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } } if (rhs_val.compareWithZero(.eq)) { @@ -9180,19 +9165,19 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( - scalar_type, - try lhs_val.intSub(rhs_val, sema.arena), + resolved_type, + try lhs_val.intSub(rhs_val, resolved_type, sema.arena), ); } else { return sema.addConstant( - scalar_type, - try lhs_val.floatSub(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatSub(rhs_val, resolved_type, sema.arena, target), ); } } else break :rs .{ .src = rhs_src, .air_tag = .sub }; @@ -9204,7 +9189,7 @@ fn analyzeArithmetic( // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; @@ -9212,12 +9197,12 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( - scalar_type, - try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.numberSubWrap(rhs_val, resolved_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .subwrap }; } else break :rs .{ .src = lhs_src, .air_tag = .subwrap }; @@ -9228,7 +9213,7 @@ fn analyzeArithmetic( // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (rhs_val.compareWithZero(.eq)) { return casted_lhs; @@ -9236,15 +9221,15 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intSub(rhs_val, sema.arena) + try lhs_val.intSub(rhs_val, resolved_type, sema.arena) else - try lhs_val.intSubSat(rhs_val, scalar_type, sema.arena, target); + try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, target); - return sema.addConstant(scalar_type, val); + return sema.addConstant(resolved_type, val); } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat }; } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat }; }, @@ -9274,7 +9259,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } } } @@ -9288,27 +9273,27 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { - return sema.addConstUndef(scalar_type); + if (rhs_val.compare(.neq, Value.negative_one, rhs_ty)) { + return sema.addConstUndef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( - scalar_type, - try lhs_val.intDiv(rhs_val, sema.arena), + resolved_type, + try lhs_val.intDiv(rhs_val, resolved_type, sema.arena), ); } else { return sema.addConstant( - scalar_type, - try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target), ); } } else { @@ -9349,7 +9334,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } } } @@ -9363,27 +9348,27 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { - return sema.addConstUndef(scalar_type); + if (rhs_val.compare(.neq, Value.negative_one, rhs_ty)) { + return sema.addConstUndef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( - scalar_type, - try lhs_val.intDiv(rhs_val, sema.arena), + resolved_type, + try lhs_val.intDiv(rhs_val, resolved_type, sema.arena), ); } else { return sema.addConstant( - scalar_type, - try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatDivTrunc(rhs_val, resolved_type, sema.arena, target), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_trunc }; @@ -9412,7 +9397,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } } } @@ -9426,27 +9411,27 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { - return sema.addConstUndef(scalar_type); + if (rhs_val.compare(.neq, Value.negative_one, rhs_ty)) { + return sema.addConstUndef(resolved_type); } } return sema.failWithUseOfUndef(block, rhs_src); } - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { if (is_int) { return sema.addConstant( - scalar_type, - try lhs_val.intDivFloor(rhs_val, sema.arena), + resolved_type, + try lhs_val.intDivFloor(rhs_val, resolved_type, sema.arena), ); } else { return sema.addConstant( - scalar_type, - try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatDivFloor(rhs_val, resolved_type, sema.arena, target), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_floor }; @@ -9474,7 +9459,7 @@ fn analyzeArithmetic( return sema.failWithUseOfUndef(block, rhs_src); } else { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } } } @@ -9491,14 +9476,14 @@ fn analyzeArithmetic( if (is_int) { // TODO: emit compile error if there is a remainder return sema.addConstant( - scalar_type, - try lhs_val.intDiv(rhs_val, sema.arena), + resolved_type, + try lhs_val.intDiv(rhs_val, resolved_type, sema.arena), ); } else { // TODO: emit compile error if there is a remainder return sema.addConstant( - scalar_type, - try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatDiv(rhs_val, resolved_type, sema.arena, target), ); } } else break :rs .{ .src = rhs_src, .air_tag = .div_exact }; @@ -9516,9 +9501,9 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - if (lhs_val.compare(.eq, Value.one, scalar_type)) { + if (lhs_val.compare(.eq, Value.one, lhs_ty)) { return casted_rhs; } } @@ -9528,13 +9513,13 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } } if (rhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - if (rhs_val.compare(.eq, Value.one, scalar_type)) { + if (rhs_val.compare(.eq, Value.one, rhs_ty)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -9542,18 +9527,18 @@ fn analyzeArithmetic( if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } } if (is_int) { return sema.addConstant( - scalar_type, - try lhs_val.intMul(rhs_val, sema.arena), + resolved_type, + try lhs_val.intMul(rhs_val, resolved_type, sema.arena), ); } else { return sema.addConstant( - scalar_type, - try lhs_val.floatMul(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, target), ); } } else break :rs .{ .src = lhs_src, .air_tag = .mul }; @@ -9567,30 +9552,30 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - if (lhs_val.compare(.eq, Value.one, scalar_type)) { + if (lhs_val.compare(.eq, Value.one, lhs_ty)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (rhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - if (rhs_val.compare(.eq, Value.one, scalar_type)) { + if (rhs_val.compare(.eq, Value.one, rhs_ty)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } return sema.addConstant( - scalar_type, - try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, target), ); } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap }; @@ -9603,34 +9588,34 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - if (lhs_val.compare(.eq, Value.one, scalar_type)) { + if (lhs_val.compare(.eq, Value.one, lhs_ty)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { if (rhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (rhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - if (rhs_val.compare(.eq, Value.one, scalar_type)) { + if (rhs_val.compare(.eq, Value.one, rhs_ty)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, sema.arena) + try lhs_val.intMul(rhs_val, resolved_type, sema.arena) else - try lhs_val.intMulSat(rhs_val, scalar_type, sema.arena, target); + try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, target); - return sema.addConstant(scalar_type, val); + return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat }; }, @@ -9654,9 +9639,9 @@ fn analyzeArithmetic( return sema.failWithUseOfUndef(block, lhs_src); } if (lhs_val.compareWithZero(.eq)) { - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - } else if (lhs_ty.isSignedInt()) { + } else if (lhs_scalar_ty.isSignedInt()) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { @@ -9667,7 +9652,7 @@ fn analyzeArithmetic( return sema.failWithDivideByZero(block, rhs_src); } if (maybe_lhs_val) |lhs_val| { - const rem_result = try lhs_val.intRem(rhs_val, sema.arena); + const rem_result = try lhs_val.intRem(rhs_val, resolved_type, sema.arena); // If this answer could possibly be different by doing `intMod`, // we must emit a compile error. Otherwise, it's OK. if (rhs_val.compareWithZero(.lt) != lhs_val.compareWithZero(.lt) and @@ -9681,12 +9666,12 @@ fn analyzeArithmetic( } if (lhs_val.compareWithZero(.lt)) { // Negative - return sema.addConstant(scalar_type, Value.zero); + return sema.addConstant(resolved_type, Value.zero); } - return sema.addConstant(scalar_type, rem_result); + return sema.addConstant(resolved_type, rem_result); } break :rs .{ .src = lhs_src, .air_tag = .rem }; - } else if (rhs_ty.isSignedInt()) { + } else if (rhs_scalar_ty.isSignedInt()) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs .{ .src = rhs_src, .air_tag = .rem }; @@ -9708,8 +9693,8 @@ fn analyzeArithmetic( return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( - scalar_type, - try lhs_val.floatRem(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target), ); } else { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); @@ -9745,8 +9730,8 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( - scalar_type, - try lhs_val.intRem(rhs_val, sema.arena), + resolved_type, + try lhs_val.intRem(rhs_val, resolved_type, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .rem }; @@ -9765,12 +9750,12 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( - scalar_type, - try lhs_val.floatRem(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatRem(rhs_val, resolved_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .rem }; } else break :rs .{ .src = lhs_src, .air_tag = .rem }; @@ -9802,8 +9787,8 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { return sema.addConstant( - scalar_type, - try lhs_val.intMod(rhs_val, sema.arena), + resolved_type, + try lhs_val.intMod(rhs_val, resolved_type, sema.arena), ); } break :rs .{ .src = lhs_src, .air_tag = .mod }; @@ -9822,12 +9807,12 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - return sema.addConstUndef(scalar_type); + return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { return sema.addConstant( - scalar_type, - try lhs_val.floatMod(rhs_val, scalar_type, sema.arena, target), + resolved_type, + try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, target), ); } else break :rs .{ .src = rhs_src, .air_tag = .mod }; } else break :rs .{ .src = lhs_src, .air_tag = .mod }; @@ -10178,6 +10163,11 @@ fn analyzeCmp( ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); + + if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); + } if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for @@ -10212,6 +10202,12 @@ fn cmpSelf( if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (resolved_type.zigTypeTag() == .Vector) { + const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.@"bool"); + const cmp_val = try lhs_val.compareVector(op, rhs_val, resolved_type, sema.arena); + return sema.addConstant(result_ty, cmp_val); + } + if (lhs_val.compare(op, rhs_val, resolved_type)) { return Air.Inst.Ref.bool_true; } else { @@ -10237,16 +10233,12 @@ fn cmpSelf( } }; try sema.requireRuntimeBlock(block, runtime_src); - - const tag: Air.Inst.Tag = switch (op) { - .lt => .cmp_lt, - .lte => .cmp_lte, - .eq => .cmp_eq, - .gte => .cmp_gte, - .gt => .cmp_gt, - .neq => .cmp_neq, - }; - // TODO handle vectors + if (resolved_type.zigTypeTag() == .Vector) { + const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.@"bool"); + const result_ty_ref = try sema.addType(result_ty); + return block.addCmpVector(casted_lhs, casted_rhs, op, result_ty_ref); + } + const tag = Air.Inst.Tag.fromCmpOp(op); return block.addBinOp(tag, casted_lhs, casted_rhs); } @@ -11367,7 +11359,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi const elem_ty = operand.elemType2(); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return Type.Tag.vector.create(sema.arena, .{ - .len = operand.arrayLen(), + .len = operand.vectorLen(), .elem_type = log2_elem_ty, }); }, @@ -13298,7 +13290,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); - const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) { + const result_val = val.floatToInt(sema.arena, operand_ty, dest_ty, target) catch |err| switch (err) { error.FloatCannotFit => { return sema.fail(block, operand_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty }); }, @@ -13325,7 +13317,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { const target = sema.mod.getTarget(); - const result_val = try val.intToFloat(sema.arena, dest_ty, target); + const result_val = try val.intToFloat(sema.arena, operand_ty, dest_ty, target); return sema.addConstant(dest_ty, result_val); } @@ -13535,14 +13527,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!is_vector) { return sema.addConstant( dest_ty, - try val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits), + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits), ); } var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); for (elems) |*elem, i| { const elem_val = val.elemValueBuffer(i, &elem_buf); - elem.* = try elem_val.intTrunc(sema.arena, dest_info.signedness, dest_info.bits); + elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits); } return sema.addConstant( dest_ty, @@ -14097,13 +14089,40 @@ fn checkSimdBinOp( ) CompileError!SimdBinOp { const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); - var vec_len: ?usize = null; + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); + var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null; + const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ + .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, + }); + const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src); + const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src); + + return SimdBinOp{ + .len = vec_len, + .lhs = lhs, + .rhs = rhs, + .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs), + .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs), + .result_ty = result_ty, + .scalar_ty = result_ty.scalarType(), + }; +} + +fn checkVectorizableBinaryOperands( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + lhs_ty: Type, + rhs_ty: Type, + lhs_src: LazySrcLoc, + rhs_src: LazySrcLoc, +) CompileError!void { + const lhs_zig_ty_tag = lhs_ty.zigTypeTag(); + const rhs_zig_ty_tag = rhs_ty.zigTypeTag(); if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { - const lhs_len = lhs_ty.arrayLen(); - const rhs_len = rhs_ty.arrayLen(); + const lhs_len = lhs_ty.vectorLen(); + const rhs_len = rhs_ty.vectorLen(); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); @@ -14114,7 +14133,6 @@ fn checkSimdBinOp( }; return sema.failWithOwnedErrorMsg(block, msg); } - vec_len = try sema.usizeCast(block, lhs_src, lhs_len); } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{ @@ -14132,21 +14150,6 @@ fn checkSimdBinOp( }; return sema.failWithOwnedErrorMsg(block, msg); } - const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ - .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, - }); - const lhs = try sema.coerce(block, result_ty, uncasted_lhs, lhs_src); - const rhs = try sema.coerce(block, result_ty, uncasted_rhs, rhs_src); - - return SimdBinOp{ - .len = vec_len, - .lhs = lhs, - .rhs = rhs, - .lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs), - .rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs), - .result_ty = result_ty, - .scalar_ty = result_ty.scalarType(), - }; } fn resolveExportOptions( @@ -14376,9 +14379,9 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. while (i < vec_len) : (i += 1) { const elem_val = operand_val.elemValueBuffer(i, &elem_buf); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, sema.arena), - .Or => accum = try accum.bitwiseOr(elem_val, sema.arena), - .Xor => accum = try accum.bitwiseXor(elem_val, sema.arena), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena), .Min => accum = accum.numberMin(elem_val), .Max => accum = accum.numberMax(elem_val), .Add => accum = try accum.numberAddWrap(elem_val, scalar_ty, sema.arena, target), @@ -14697,10 +14700,10 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target), .Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target), - .And => try stored_val.bitwiseAnd (operand_val, sema.arena), + .And => try stored_val.bitwiseAnd (operand_val, operand_ty, sema.arena), .Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target), - .Or => try stored_val.bitwiseOr (operand_val, sema.arena), - .Xor => try stored_val.bitwiseXor (operand_val, sema.arena), + .Or => try stored_val.bitwiseOr (operand_val, operand_ty, sema.arena), + .Xor => try stored_val.bitwiseXor (operand_val, operand_ty, sema.arena), .Max => stored_val.numberMax (operand_val), .Min => stored_val.numberMin (operand_val), // zig fmt: on @@ -17523,7 +17526,7 @@ fn coerce( if (val.floatHasFraction()) { return sema.fail(block, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val.fmtValue(inst_ty), dest_ty }); } - const result_val = val.floatToInt(sema.arena, dest_ty, target) catch |err| switch (err) { + const result_val = val.floatToInt(sema.arena, inst_ty, dest_ty, target) catch |err| switch (err) { error.FloatCannotFit => { return sema.fail(block, inst_src, "integer value {d} cannot be stored in type '{}'", .{ std.math.floor(val.toFloat(f64)), dest_ty }); }, @@ -17586,7 +17589,7 @@ fn coerce( }, .Int, .ComptimeInt => int: { const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :int; - const result_val = try val.intToFloat(sema.arena, dest_ty, target); + const result_val = try val.intToFloat(sema.arena, inst_ty, dest_ty, target); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty)) { @@ -17823,8 +17826,21 @@ fn coerceInMemoryAllowed( return .ok; } + // Vectors + if (dest_tag == .Vector and src_tag == .Vector) vectors: { + const dest_len = dest_ty.vectorLen(); + const src_len = src_ty.vectorLen(); + if (dest_len != src_len) break :vectors; + + const dest_elem_ty = dest_ty.scalarType(); + const src_elem_ty = src_ty.scalarType(); + const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src); + if (child == .no_match) break :vectors; + + return .ok; + } + // TODO: non-pointer-like optionals - // TODO: vectors return .no_match; } @@ -19697,19 +19713,6 @@ fn cmpNumeric( const lhs_ty_tag = lhs_ty.zigTypeTag(); const rhs_ty_tag = rhs_ty.zigTypeTag(); - if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { - if (lhs_ty.vectorLen() != rhs_ty.vectorLen()) { - return sema.fail(block, src, "vector length mismatch: {d} and {d}", .{ - lhs_ty.vectorLen(), rhs_ty.vectorLen(), - }); - } - return sema.fail(block, src, "TODO implement support for vectors in cmpNumeric", .{}); - } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { - return sema.fail(block, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ - lhs_ty, rhs_ty, - }); - } - const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { @@ -19895,6 +19898,46 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } +/// Asserts that lhs and rhs types are both vectors. +fn cmpVector( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, + op: std.math.CompareOperator, + lhs_src: LazySrcLoc, + rhs_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); + assert(lhs_ty.zigTypeTag() == .Vector); + assert(rhs_ty.zigTypeTag() == .Vector); + try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); + + const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.@"bool"); + + const runtime_src: LazySrcLoc = src: { + if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { + if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + return sema.addConstUndef(result_ty); + } + const cmp_val = try lhs_val.compareVector(op, rhs_val, lhs_ty, sema.arena); + return sema.addConstant(result_ty, cmp_val); + } else { + break :src rhs_src; + } + } else { + break :src lhs_src; + } + }; + + try sema.requireRuntimeBlock(block, runtime_src); + const result_ty_inst = try sema.addType(result_ty); + return block.addCmpVector(lhs, rhs, op, result_ty_inst); +} + fn wrapOptional( sema: *Sema, block: *Block, @@ -21201,7 +21244,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { map.putAssumeCapacityContext(copied_val, {}, .{ .ty = int_tag_ty }); } else { const val = if (last_tag_val) |val| - try val.intAdd(Value.one, sema.arena) + try val.intAdd(Value.one, int_tag_ty, sema.arena) else Value.zero; last_tag_val = val; diff --git a/src/value.zig b/src/value.zig index c5e082485a..6f0f786072 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1846,8 +1846,23 @@ pub const Value = extern union { return order(lhs, rhs).compare(op); } - /// Asserts the value is comparable. Both operands have type `ty`. + /// Asserts the values are comparable. Both operands have type `ty`. + /// Vector results will be reduced with AND. pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type) bool { + if (ty.zigTypeTag() == .Vector) { + var i: usize = 0; + while (i < ty.vectorLen()) : (i += 1) { + if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType())) { + return false; + } + } + return true; + } + return compareScalar(lhs, op, rhs, ty); + } + + /// Asserts the values are comparable. Both operands have type `ty`. + pub fn compareScalar(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type) bool { return switch (op) { .eq => lhs.eql(rhs, ty), .neq => !lhs.eql(rhs, ty), @@ -1855,18 +1870,25 @@ pub const Value = extern union { }; } + /// Asserts the values are comparable vectors of type `ty`. + pub fn compareVector(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, allocator: Allocator) !Value { + assert(ty.zigTypeTag() == .Vector); + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType()); + scalar.* = if (res_bool) Value.@"true" else Value.@"false"; + } + return Value.Tag.aggregate.create(allocator, result_data); + } + /// Asserts the value is comparable. - /// For vectors this is only valid with op == .eq. + /// Vector results will be reduced with AND. pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool { switch (lhs.tag()) { - .repeated => { - assert(op == .eq); - return lhs.castTag(.repeated).?.data.compareWithZero(.eq); - }, + .repeated => return lhs.castTag(.repeated).?.data.compareWithZero(op), .aggregate => { - assert(op == .eq); for (lhs.castTag(.aggregate).?.data) |elem_val| { - if (!elem_val.compareWithZero(.eq)) return false; + if (!elem_val.compareWithZero(op)) return false; } return true; }, @@ -2404,6 +2426,27 @@ pub const Value = extern union { }; } + /// Index into a vector-like `Value`. Asserts `index` is a valid index for `val`. + /// Some scalar values are considered vector-like to avoid needing to allocate + /// a new `repeated` each time a constant is used. + pub fn indexVectorlike(val: Value, index: usize) Value { + return switch (val.tag()) { + .aggregate => val.castTag(.aggregate).?.data[index], + + .repeated => val.castTag(.repeated).?.data, + // These values will implicitly be treated as `repeated`. + .zero, + .one, + .bool_false, + .bool_true, + .int_i64, + .int_u64, + => val, + + else => unreachable, + }; + } + /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value { @@ -2646,25 +2689,38 @@ pub const Value = extern union { }; } - pub fn intToFloat(val: Value, arena: Allocator, dest_ty: Type, target: Target) !Value { + pub fn intToFloat(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value { + if (int_ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, int_ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, int_ty.scalarType(), float_ty.scalarType(), target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return intToFloatScalar(val, arena, int_ty, float_ty, target); + } + + pub fn intToFloatScalar(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value { + assert(int_ty.isNumeric() and !int_ty.isAnyFloat()); + assert(float_ty.isAnyFloat()); switch (val.tag()) { .undef, .zero, .one => return val, .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, dest_ty, target); + return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); }, .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, dest_ty, target); + return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); }, .int_big_positive => { const limbs = val.castTag(.int_big_positive).?.data; const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, dest_ty, target); + return floatToValue(float, arena, float_ty, target); }, .int_big_negative => { const limbs = val.castTag(.int_big_negative).?.data; const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, dest_ty, target); + return floatToValue(float, arena, float_ty, target); }, else => unreachable, } @@ -2694,7 +2750,20 @@ pub const Value = extern union { } } - pub fn floatToInt(val: Value, arena: Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value { + pub fn floatToInt(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value { + if (float_ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatToIntScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), int_ty.scalarType(), target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatToIntScalar(val, arena, float_ty, int_ty, target); + } + + pub fn floatToIntScalar(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value { + assert(float_ty.isAnyFloat()); + assert(int_ty.isInt()); const Limb = std.math.big.Limb; var value = val.toFloat(f64); // TODO: f128 ? @@ -2724,7 +2793,7 @@ pub const Value = extern union { else try Value.Tag.int_big_positive.create(arena, result_limbs); - if (result.intFitsInType(dest_ty, target)) { + if (result.intFitsInType(int_ty, target)) { return result; } else { return error.FloatCannotFit; @@ -2771,18 +2840,36 @@ pub const Value = extern union { }; } - /// Supports both floats and ints; handles undefined. + /// Supports both (vectors of) floats and ints; handles undefined scalars. pub fn numberAddWrap( lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try numberAddWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return numberAddWrapScalar(lhs, rhs, ty, arena, target); + } + + /// Supports both floats and ints; handles undefined. + pub fn numberAddWrapScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); if (ty.zigTypeTag() == .ComptimeInt) { - return intAdd(lhs, rhs, arena); + return intAdd(lhs, rhs, ty, arena); } if (ty.isAnyFloat()) { @@ -2809,13 +2896,31 @@ pub const Value = extern union { } } - /// Supports integers only; asserts neither operand is undefined. + /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intAddSat( lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intAddSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return intAddSatScalar(lhs, rhs, ty, arena, target); + } + + /// Supports integers only; asserts neither operand is undefined. + pub fn intAddSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); @@ -2861,18 +2966,36 @@ pub const Value = extern union { }; } - /// Supports both floats and ints; handles undefined. + /// Supports both (vectors of) floats and ints; handles undefined scalars. pub fn numberSubWrap( lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try numberSubWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return numberSubWrapScalar(lhs, rhs, ty, arena, target); + } + + /// Supports both floats and ints; handles undefined. + pub fn numberSubWrapScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); if (ty.zigTypeTag() == .ComptimeInt) { - return intSub(lhs, rhs, arena); + return intSub(lhs, rhs, ty, arena); } if (ty.isAnyFloat()) { @@ -2883,13 +3006,31 @@ pub const Value = extern union { return overflow_result.wrapped_result; } - /// Supports integers only; asserts neither operand is undefined. + /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intSubSat( lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intSubSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return intSubSatScalar(lhs, rhs, ty, arena, target); + } + + /// Supports integers only; asserts neither operand is undefined. + pub fn intSubSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); @@ -2944,18 +3085,36 @@ pub const Value = extern union { }; } - /// Supports both floats and ints; handles undefined. + /// Supports both (vectors of) floats and ints; handles undefined scalars. pub fn numberMulWrap( lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try numberMulWrapScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return numberMulWrapScalar(lhs, rhs, ty, arena, target); + } + + /// Supports both floats and ints; handles undefined. + pub fn numberMulWrapScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); if (ty.zigTypeTag() == .ComptimeInt) { - return intMul(lhs, rhs, arena); + return intMul(lhs, rhs, ty, arena); } if (ty.isAnyFloat()) { @@ -2966,13 +3125,31 @@ pub const Value = extern union { return overflow_result.wrapped_result; } - /// Supports integers only; asserts neither operand is undefined. + /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intMulSat( lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intMulSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return intMulSatScalar(lhs, rhs, ty, arena, target); + } + + /// Supports (vectors of) integers only; asserts neither operand is undefined. + pub fn intMulSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); @@ -3025,8 +3202,20 @@ pub const Value = extern union { }; } - /// operands must be integers; handles undefined. + /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try bitwiseNotScalar(val.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return bitwiseNotScalar(val, ty, arena, target); + } + + /// operands must be integers; handles undefined. + pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value { if (val.isUndef()) return Value.initTag(.undef); const info = ty.intInfo(target); @@ -3050,8 +3239,20 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } + /// operands must be (vectors of) integers; handles undefined scalars. + pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try bitwiseAndScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return bitwiseAndScalar(lhs, rhs, allocator); + } + /// operands must be integers; handles undefined. - pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: Allocator) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without @@ -3070,22 +3271,46 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } - /// operands must be integers; handles undefined. + /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try bitwiseNandScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return bitwiseNandScalar(lhs, rhs, ty, arena, target); + } + + /// operands must be integers; handles undefined. + pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - const anded = try bitwiseAnd(lhs, rhs, arena); + const anded = try bitwiseAnd(lhs, rhs, ty, arena); const all_ones = if (ty.isSignedInt()) try Value.Tag.int_i64.create(arena, -1) else try ty.maxInt(arena, target); - return bitwiseXor(anded, all_ones, arena); + return bitwiseXor(anded, all_ones, ty, arena); + } + + /// operands must be (vectors of) integers; handles undefined scalars. + pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try bitwiseOrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return bitwiseOrScalar(lhs, rhs, allocator); } /// operands must be integers; handles undefined. - pub fn bitwiseOr(lhs: Value, rhs: Value, arena: Allocator) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without @@ -3103,8 +3328,20 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } + /// operands must be (vectors of) integers; handles undefined scalars. + pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try bitwiseXorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return bitwiseXorScalar(lhs, rhs, allocator); + } + /// operands must be integers; handles undefined. - pub fn bitwiseXor(lhs: Value, rhs: Value, arena: Allocator) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without @@ -3123,7 +3360,18 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } - pub fn intAdd(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intAdd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intAddScalar(lhs, rhs, allocator); + } + + pub fn intAddScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3139,7 +3387,18 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } - pub fn intSub(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intSub(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intSubScalar(lhs, rhs, allocator); + } + + pub fn intSubScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3155,7 +3414,18 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } - pub fn intDiv(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intDivScalar(lhs, rhs, allocator); + } + + pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3180,7 +3450,18 @@ pub const Value = extern union { return fromBigInt(allocator, result_q.toConst()); } - pub fn intDivFloor(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intDivFloorScalar(lhs, rhs, allocator); + } + + pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3205,7 +3486,18 @@ pub const Value = extern union { return fromBigInt(allocator, result_q.toConst()); } - pub fn intRem(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intRem(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intRemScalar(lhs, rhs, allocator); + } + + pub fn intRemScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3232,7 +3524,18 @@ pub const Value = extern union { return fromBigInt(allocator, result_r.toConst()); } - pub fn intMod(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intModScalar(lhs, rhs, allocator); + } + + pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3270,6 +3573,17 @@ pub const Value = extern union { } pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatRemScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatRemScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { switch (float_type.floatBits(target)) { 16 => { const lhs_val = lhs.toFloat(f16); @@ -3304,6 +3618,17 @@ pub const Value = extern union { } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatModScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatModScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { switch (float_type.floatBits(target)) { 16 => { const lhs_val = lhs.toFloat(f16); @@ -3337,7 +3662,18 @@ pub const Value = extern union { } } - pub fn intMul(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intMulScalar(lhs, rhs, allocator); + } + + pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3358,7 +3694,30 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } - pub fn intTrunc(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value { + pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, bits); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intTruncScalar(val, allocator, signedness, bits); + } + + /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. + pub fn intTruncBitsAsValue(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: Value) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try intTruncScalar(val.indexVectorlike(i), allocator, signedness, @intCast(u16, bits.indexVectorlike(i).toUnsignedInt())); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt())); + } + + pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; @@ -3374,7 +3733,18 @@ pub const Value = extern union { return fromBigInt(allocator, result_bigint.toConst()); } - pub fn shl(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try shlScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return shlScalar(lhs, rhs, allocator); + } + + pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3430,6 +3800,23 @@ pub const Value = extern union { ty: Type, arena: Allocator, target: Target, + ) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try shlSatScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return shlSatScalar(lhs, rhs, ty, arena, target); + } + + pub fn shlSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, ) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3458,13 +3845,41 @@ pub const Value = extern union { arena: Allocator, target: Target, ) !Value { - const shifted = try lhs.shl(rhs, arena); + if (ty.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try shlTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), ty.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return shlTruncScalar(lhs, rhs, ty, arena, target); + } + + pub fn shlTruncScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + target: Target, + ) !Value { + const shifted = try lhs.shl(rhs, ty, arena); const int_info = ty.intInfo(target); - const truncated = try shifted.intTrunc(arena, int_info.signedness, int_info.bits); + const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits); return truncated; } - pub fn shr(lhs: Value, rhs: Value, allocator: Allocator) !Value { + pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator) !Value { + if (ty.zigTypeTag() == .Vector) { + const result_data = try allocator.alloc(Value, ty.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try shrScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), allocator); + } + return Value.Tag.aggregate.create(allocator, result_data); + } + return shrScalar(lhs, rhs, allocator); + } + + pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3497,6 +3912,23 @@ pub const Value = extern union { float_type: Type, arena: Allocator, target: Target, + ) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatAddScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatAddScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatAddScalar( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + target: Target, ) !Value { switch (float_type.floatBits(target)) { 16 => { @@ -3534,6 +3966,23 @@ pub const Value = extern union { float_type: Type, arena: Allocator, target: Target, + ) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatSubScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatSubScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatSubScalar( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + target: Target, ) !Value { switch (float_type.floatBits(target)) { 16 => { @@ -3571,6 +4020,23 @@ pub const Value = extern union { float_type: Type, arena: Allocator, target: Target, + ) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatDivScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatDivScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatDivScalar( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + target: Target, ) !Value { switch (float_type.floatBits(target)) { 16 => { @@ -3611,6 +4077,23 @@ pub const Value = extern union { float_type: Type, arena: Allocator, target: Target, + ) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatDivFloorScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatDivFloorScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatDivFloorScalar( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + target: Target, ) !Value { switch (float_type.floatBits(target)) { 16 => { @@ -3651,6 +4134,23 @@ pub const Value = extern union { float_type: Type, arena: Allocator, target: Target, + ) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatDivTruncScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatDivTruncScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatDivTruncScalar( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + target: Target, ) !Value { switch (float_type.floatBits(target)) { 16 => { @@ -3691,6 +4191,23 @@ pub const Value = extern union { float_type: Type, arena: Allocator, target: Target, + ) !Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floatMulScalar(lhs.indexVectorlike(i), rhs.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatMulScalar(lhs, rhs, float_type, arena, target); + } + + pub fn floatMulScalar( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + target: Target, ) !Value { switch (float_type.floatBits(target)) { 16 => { @@ -3726,6 +4243,17 @@ pub const Value = extern union { } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try sqrtScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return sqrtScalar(val, float_type, arena, target); + } + + pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3758,6 +4286,17 @@ pub const Value = extern union { } pub fn sin(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try sinScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return sinScalar(val, float_type, arena, target); + } + + pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3790,6 +4329,17 @@ pub const Value = extern union { } pub fn cos(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try cosScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return cosScalar(val, float_type, arena, target); + } + + pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3822,6 +4372,17 @@ pub const Value = extern union { } pub fn exp(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try expScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return expScalar(val, float_type, arena, target); + } + + pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3854,6 +4415,17 @@ pub const Value = extern union { } pub fn exp2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try exp2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return exp2Scalar(val, float_type, arena, target); + } + + pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3886,6 +4458,17 @@ pub const Value = extern union { } pub fn log(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try logScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return logScalar(val, float_type, arena, target); + } + + pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3918,6 +4501,17 @@ pub const Value = extern union { } pub fn log2(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try log2Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return log2Scalar(val, float_type, arena, target); + } + + pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3950,6 +4544,17 @@ pub const Value = extern union { } pub fn log10(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try log10Scalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return log10Scalar(val, float_type, arena, target); + } + + pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -3982,6 +4587,17 @@ pub const Value = extern union { } pub fn fabs(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try fabsScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return fabsScalar(val, float_type, arena, target); + } + + pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -4011,6 +4627,17 @@ pub const Value = extern union { } pub fn floor(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try floorScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floorScalar(val, float_type, arena, target); + } + + pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -4040,6 +4667,17 @@ pub const Value = extern union { } pub fn ceil(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try ceilScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return ceilScalar(val, float_type, arena, target); + } + + pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -4069,6 +4707,17 @@ pub const Value = extern union { } pub fn round(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try roundScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return roundScalar(val, float_type, arena, target); + } + + pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -4098,6 +4747,17 @@ pub const Value = extern union { } pub fn trunc(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try truncScalar(val.indexVectorlike(i), float_type.scalarType(), arena, target); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return truncScalar(val, float_type, arena, target); + } + + pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { const f = val.toFloat(f16); @@ -4133,6 +4793,31 @@ pub const Value = extern union { addend: Value, arena: Allocator, target: Target, + ) Allocator.Error!Value { + if (float_type.zigTypeTag() == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen()); + for (result_data) |*scalar, i| { + scalar.* = try mulAddScalar( + float_type.scalarType(), + mulend1.indexVectorlike(i), + mulend2.indexVectorlike(i), + addend.indexVectorlike(i), + arena, + target, + ); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target); + } + + pub fn mulAddScalar( + float_type: Type, + mulend1: Value, + mulend2: Value, + addend: Value, + arena: Allocator, + target: Target, ) Allocator.Error!Value { switch (float_type.floatBits(target)) { 16 => { From c9598c4cd38f742e75a54d5a28d169351104b61d Mon Sep 17 00:00:00 2001 From: William Sengir Date: Sun, 20 Mar 2022 01:11:21 -0700 Subject: [PATCH 10/12] behavior tests: enable all vector tests for the stage2 LLVM backend --- src/value.zig | 16 ++- test/behavior/vector.zig | 209 +++++++++++++++++++++++++++++++++------ 2 files changed, 184 insertions(+), 41 deletions(-) diff --git a/src/value.zig b/src/value.zig index 6f0f786072..454b1d76a2 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2693,16 +2693,14 @@ pub const Value = extern union { if (int_ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, int_ty.scalarType(), float_ty.scalarType(), target); + scalar.* = try intToFloatScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), target); } return Value.Tag.aggregate.create(arena, result_data); } - return intToFloatScalar(val, arena, int_ty, float_ty, target); + return intToFloatScalar(val, arena, float_ty, target); } - pub fn intToFloatScalar(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, target: Target) !Value { - assert(int_ty.isNumeric() and !int_ty.isAnyFloat()); - assert(float_ty.isAnyFloat()); + pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target) !Value { switch (val.tag()) { .undef, .zero, .one => return val, .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 @@ -2754,16 +2752,14 @@ pub const Value = extern union { if (float_ty.zigTypeTag() == .Vector) { const result_data = try arena.alloc(Value, float_ty.vectorLen()); for (result_data) |*scalar, i| { - scalar.* = try floatToIntScalar(val.indexVectorlike(i), arena, float_ty.scalarType(), int_ty.scalarType(), target); + scalar.* = try floatToIntScalar(val.indexVectorlike(i), arena, int_ty.scalarType(), target); } return Value.Tag.aggregate.create(arena, result_data); } - return floatToIntScalar(val, arena, float_ty, int_ty, target); + return floatToIntScalar(val, arena, int_ty, target); } - pub fn floatToIntScalar(val: Value, arena: Allocator, float_ty: Type, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value { - assert(float_ty.isAnyFloat()); - assert(int_ty.isInt()); + pub fn floatToIntScalar(val: Value, arena: Allocator, int_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value { const Limb = std.math.big.Limb; var value = val.toFloat(f64); // TODO: f128 ? diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 697969f42e..1d8dd9f661 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -6,7 +6,12 @@ const expect = std.testing.expect; const Vector = std.meta.Vector; test "implicit cast vector to array - bool" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { const a: Vector(4, bool) = [_]bool{ true, false, true, false }; @@ -19,7 +24,12 @@ test "implicit cast vector to array - bool" { } test "vector wrap operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; @@ -36,7 +46,12 @@ test "vector wrap operators" { } test "vector bin compares with mem.eql" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; @@ -54,7 +69,12 @@ test "vector bin compares with mem.eql" { } test "vector int operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [4]i32{ 10, 20, 30, 40 }; @@ -70,7 +90,12 @@ test "vector int operators" { } test "vector float operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, f32) = [4]f32{ 10, 20, 30, 40 }; @@ -86,7 +111,12 @@ test "vector float operators" { } test "vector bit operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, u8) = [4]u8{ 0b10101010, 0b10101010, 0b10101010, 0b10101010 }; @@ -101,7 +131,12 @@ test "vector bit operators" { } test "implicit cast vector to array" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var a: Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; @@ -115,7 +150,12 @@ test "implicit cast vector to array" { } test "array to vector" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var foo: f32 = 3.14; @@ -129,7 +169,12 @@ test "array to vector" { } test "vector casts of sizes not divisible by 8" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { { @@ -159,7 +204,12 @@ test "vector casts of sizes not divisible by 8" { } test "vector @splat" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn testForT(comptime N: comptime_int, v: anytype) !void { const T = @TypeOf(v); @@ -195,7 +245,12 @@ test "vector @splat" { } test "load vector elements via comptime index" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; @@ -213,7 +268,12 @@ test "load vector elements via comptime index" { } test "store vector elements via comptime index" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; @@ -237,7 +297,12 @@ test "store vector elements via comptime index" { } test "load vector elements via runtime index" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; @@ -255,7 +320,12 @@ test "load vector elements via runtime index" { } test "store vector elements via runtime index" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { var v: Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; @@ -274,7 +344,12 @@ test "store vector elements via runtime index" { } test "initialize vector which is a struct field" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const Vec4Obj = struct { data: Vector(4, f32), }; @@ -292,7 +367,12 @@ test "initialize vector which is a struct field" { } test "vector comparison operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { { @@ -327,7 +407,12 @@ test "vector comparison operators" { } test "vector division operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTestDiv(comptime T: type, x: Vector(4, T), y: Vector(4, T)) !void { if (!comptime std.meta.trait.isSignedInt(T)) { @@ -410,7 +495,12 @@ test "vector division operators" { } test "vector bitwise not operator" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTestNot(comptime T: type, x: Vector(4, T)) !void { var y = ~x; @@ -436,7 +526,12 @@ test "vector bitwise not operator" { } test "vector shift operators" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTestShift(x: anytype, y: anytype) !void { const N = @typeInfo(@TypeOf(x)).Array.len; @@ -667,7 +762,12 @@ test "vector reduce operation" { } test "mask parameter of @shuffle is comptime scope" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const __v4hi = std.meta.Vector(4, i16); var v4_a = __v4hi{ 0, 0, 0, 0 }; var v4_b = __v4hi{ 0, 0, 0, 0 }; @@ -681,13 +781,30 @@ test "mask parameter of @shuffle is comptime scope" { } test "saturating add" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { - const u8x3 = std.meta.Vector(3, u8); - try expect(mem.eql(u8, &@as([3]u8, u8x3{ 255, 255, 255 }), &@as([3]u8, u8x3{ 255, 254, 1 } +| u8x3{ 1, 2, 255 }))); - const i8x3 = std.meta.Vector(3, i8); - try expect(mem.eql(i8, &@as([3]i8, i8x3{ 127, 127, 127 }), &@as([3]i8, i8x3{ 127, 126, 1 } +| i8x3{ 1, 2, 127 }))); + { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 + const u8x3 = std.meta.Vector(3, u8); + const lhs = u8x3{ 255, 254, 1 }; + const rhs = u8x3{ 1, 2, 255 }; + const result = lhs +| rhs; + const expected = u8x3{ 255, 255, 255 }; + try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); + } + { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 + const i8x3 = std.meta.Vector(3, i8); + const lhs = i8x3{ 127, 126, 1 }; + const rhs = i8x3{ 1, 2, 127 }; + const result = lhs +| rhs; + const expected = i8x3{ 127, 127, 127 }; + try expect(mem.eql(i8, &@as([3]i8, expected), &@as([3]i8, result))); + } } }; try S.doTheTest(); @@ -695,11 +812,21 @@ test "saturating add" { } test "saturating subtraction" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { + // Broken out to avoid https://github.com/ziglang/zig/issues/11251 const u8x3 = std.meta.Vector(3, u8); - try expect(mem.eql(u8, &@as([3]u8, u8x3{ 0, 0, 0 }), &@as([3]u8, u8x3{ 0, 0, 0 } -| u8x3{ 255, 255, 255 }))); + const lhs = u8x3{ 0, 0, 0 }; + const rhs = u8x3{ 255, 255, 255 }; + const result = lhs -| rhs; + const expected = u8x3{ 0, 0, 0 }; + try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); } }; try S.doTheTest(); @@ -707,14 +834,24 @@ test "saturating subtraction" { } test "saturating multiplication" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + // TODO: once #9660 has been solved, remove this line if (builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; const S = struct { fn doTheTest() !void { + // Broken out to avoid https://github.com/ziglang/zig/issues/11251 const u8x3 = std.meta.Vector(3, u8); - try expect(mem.eql(u8, &@as([3]u8, u8x3{ 255, 255, 255 }), &@as([3]u8, u8x3{ 2, 2, 2 } *| u8x3{ 255, 255, 255 }))); + const lhs = u8x3{ 2, 2, 2 }; + const rhs = u8x3{ 255, 255, 255 }; + const result = lhs *| rhs; + const expected = u8x3{ 255, 255, 255 }; + try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); } }; @@ -723,11 +860,21 @@ test "saturating multiplication" { } test "saturating shift-left" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + const S = struct { fn doTheTest() !void { + // Broken out to avoid https://github.com/ziglang/zig/issues/11251 const u8x3 = std.meta.Vector(3, u8); - try expect(mem.eql(u8, &@as([3]u8, u8x3{ 255, 255, 255 }), &@as([3]u8, u8x3{ 255, 255, 255 } <<| u8x3{ 1, 1, 1 }))); + const lhs = u8x3{ 1, 1, 1 }; + const rhs = u8x3{ 255, 255, 255 }; + const result = lhs <<| rhs; + const expected = u8x3{ 255, 255, 255 }; + try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); } }; try S.doTheTest(); From 513a53e9aaa6356faf385e257925a8949bd1b46a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 21 Mar 2022 17:03:05 -0700 Subject: [PATCH 11/12] Sema: restore propagation of error.GenericPoison for checking vectors --- src/Sema.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 500dec5246..208f33cf7c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -14118,8 +14118,8 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const lhs_zig_ty_tag = lhs_ty.zigTypeTag(); - const rhs_zig_ty_tag = rhs_ty.zigTypeTag(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { const lhs_len = lhs_ty.vectorLen(); const rhs_len = rhs_ty.vectorLen(); From 7eddef423d74318ef9190864232f2e224837461e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 21 Mar 2022 17:03:24 -0700 Subject: [PATCH 12/12] behavior tests: alter test coverage for vectors * Use `@Vector` syntax instead of `std.meta.Vector`. * Use `var` instead of `const` for tests so that we get runtime coverage instead of only comptime coverage. Comptime coverage is done with `comptime doTheTest()` calls. --- test/behavior/vector.zig | 117 +++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 59 deletions(-) diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 1d8dd9f661..1651e8107b 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const expect = std.testing.expect; -const Vector = std.meta.Vector; test "implicit cast vector to array - bool" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -14,7 +13,7 @@ test "implicit cast vector to array - bool" { const S = struct { fn doTheTest() !void { - const a: Vector(4, bool) = [_]bool{ true, false, true, false }; + const a: @Vector(4, bool) = [_]bool{ true, false, true, false }; const result_array: [4]bool = a; try expect(mem.eql(bool, &result_array, &[4]bool{ true, false, true, false })); } @@ -32,12 +31,12 @@ test "vector wrap operators" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; - var x: Vector(4, i32) = [4]i32{ 1, 2147483647, 3, 4 }; + var v: @Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; + var x: @Vector(4, i32) = [4]i32{ 1, 2147483647, 3, 4 }; try expect(mem.eql(i32, &@as([4]i32, v +% x), &[4]i32{ -2147483648, 2147483645, 33, 44 })); try expect(mem.eql(i32, &@as([4]i32, v -% x), &[4]i32{ 2147483646, 2147483647, 27, 36 })); try expect(mem.eql(i32, &@as([4]i32, v *% x), &[4]i32{ 2147483647, 2, 90, 160 })); - var z: Vector(4, i32) = [4]i32{ 1, 2, 3, -2147483648 }; + var z: @Vector(4, i32) = [4]i32{ 1, 2, 3, -2147483648 }; try expect(mem.eql(i32, &@as([4]i32, -%z), &[4]i32{ -1, -2, -3, -2147483648 })); } }; @@ -54,8 +53,8 @@ test "vector bin compares with mem.eql" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; - var x: Vector(4, i32) = [4]i32{ 1, 2147483647, 30, 4 }; + var v: @Vector(4, i32) = [4]i32{ 2147483647, -2, 30, 40 }; + var x: @Vector(4, i32) = [4]i32{ 1, 2147483647, 30, 4 }; try expect(mem.eql(bool, &@as([4]bool, v == x), &[4]bool{ false, false, true, false })); try expect(mem.eql(bool, &@as([4]bool, v != x), &[4]bool{ true, true, false, true })); try expect(mem.eql(bool, &@as([4]bool, v < x), &[4]bool{ false, true, false, false })); @@ -77,8 +76,8 @@ test "vector int operators" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [4]i32{ 10, 20, 30, 40 }; - var x: Vector(4, i32) = [4]i32{ 1, 2, 3, 4 }; + var v: @Vector(4, i32) = [4]i32{ 10, 20, 30, 40 }; + var x: @Vector(4, i32) = [4]i32{ 1, 2, 3, 4 }; try expect(mem.eql(i32, &@as([4]i32, v + x), &[4]i32{ 11, 22, 33, 44 })); try expect(mem.eql(i32, &@as([4]i32, v - x), &[4]i32{ 9, 18, 27, 36 })); try expect(mem.eql(i32, &@as([4]i32, v * x), &[4]i32{ 10, 40, 90, 160 })); @@ -98,8 +97,8 @@ test "vector float operators" { const S = struct { fn doTheTest() !void { - var v: Vector(4, f32) = [4]f32{ 10, 20, 30, 40 }; - var x: Vector(4, f32) = [4]f32{ 1, 2, 3, 4 }; + var v: @Vector(4, f32) = [4]f32{ 10, 20, 30, 40 }; + var x: @Vector(4, f32) = [4]f32{ 1, 2, 3, 4 }; try expect(mem.eql(f32, &@as([4]f32, v + x), &[4]f32{ 11, 22, 33, 44 })); try expect(mem.eql(f32, &@as([4]f32, v - x), &[4]f32{ 9, 18, 27, 36 })); try expect(mem.eql(f32, &@as([4]f32, v * x), &[4]f32{ 10, 40, 90, 160 })); @@ -119,8 +118,8 @@ test "vector bit operators" { const S = struct { fn doTheTest() !void { - var v: Vector(4, u8) = [4]u8{ 0b10101010, 0b10101010, 0b10101010, 0b10101010 }; - var x: Vector(4, u8) = [4]u8{ 0b11110000, 0b00001111, 0b10101010, 0b01010101 }; + var v: @Vector(4, u8) = [4]u8{ 0b10101010, 0b10101010, 0b10101010, 0b10101010 }; + var x: @Vector(4, u8) = [4]u8{ 0b11110000, 0b00001111, 0b10101010, 0b01010101 }; try expect(mem.eql(u8, &@as([4]u8, v ^ x), &[4]u8{ 0b01011010, 0b10100101, 0b00000000, 0b11111111 })); try expect(mem.eql(u8, &@as([4]u8, v | x), &[4]u8{ 0b11111010, 0b10101111, 0b10101010, 0b11111111 })); try expect(mem.eql(u8, &@as([4]u8, v & x), &[4]u8{ 0b10100000, 0b00001010, 0b10101010, 0b00000000 })); @@ -139,7 +138,7 @@ test "implicit cast vector to array" { const S = struct { fn doTheTest() !void { - var a: Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; + var a: @Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; var result_array: [4]i32 = a; result_array = a; try expect(mem.eql(i32, &result_array, &[4]i32{ 1, 2, 3, 4 })); @@ -160,7 +159,7 @@ test "array to vector" { fn doTheTest() !void { var foo: f32 = 3.14; var arr = [4]f32{ foo, 1.5, 0.0, 0.0 }; - var vec: Vector(4, f32) = arr; + var vec: @Vector(4, f32) = arr; try expect(mem.eql(f32, &@as([4]f32, vec), &arr)); } }; @@ -178,22 +177,22 @@ test "vector casts of sizes not divisible by 8" { const S = struct { fn doTheTest() !void { { - var v: Vector(4, u3) = [4]u3{ 5, 2, 3, 0 }; + var v: @Vector(4, u3) = [4]u3{ 5, 2, 3, 0 }; var x: [4]u3 = v; try expect(mem.eql(u3, &x, &@as([4]u3, v))); } { - var v: Vector(4, u2) = [4]u2{ 1, 2, 3, 0 }; + var v: @Vector(4, u2) = [4]u2{ 1, 2, 3, 0 }; var x: [4]u2 = v; try expect(mem.eql(u2, &x, &@as([4]u2, v))); } { - var v: Vector(4, u1) = [4]u1{ 1, 0, 1, 0 }; + var v: @Vector(4, u1) = [4]u1{ 1, 0, 1, 0 }; var x: [4]u1 = v; try expect(mem.eql(u1, &x, &@as([4]u1, v))); } { - var v: Vector(4, bool) = [4]bool{ false, false, true, false }; + var v: @Vector(4, bool) = [4]bool{ false, false, true, false }; var x: [4]bool = v; try expect(mem.eql(bool, &x, &@as([4]bool, v))); } @@ -214,7 +213,7 @@ test "vector @splat" { fn testForT(comptime N: comptime_int, v: anytype) !void { const T = @TypeOf(v); var vec = @splat(N, v); - try expect(Vector(N, T) == @TypeOf(vec)); + try expect(@Vector(N, T) == @TypeOf(vec)); var as_array = @as([N]T, vec); for (as_array) |elem| try expect(v == elem); } @@ -253,7 +252,7 @@ test "load vector elements via comptime index" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; + var v: @Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; try expect(v[0] == 1); try expect(v[1] == 2); try expect(loadv(&v[2]) == 3); @@ -276,7 +275,7 @@ test "store vector elements via comptime index" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; + var v: @Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; v[2] = 42; try expect(v[1] == 5); @@ -305,7 +304,7 @@ test "load vector elements via runtime index" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; + var v: @Vector(4, i32) = [_]i32{ 1, 2, 3, undefined }; var i: u32 = 0; try expect(v[i] == 1); i += 1; @@ -328,7 +327,7 @@ test "store vector elements via runtime index" { const S = struct { fn doTheTest() !void { - var v: Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; + var v: @Vector(4, i32) = [_]i32{ 1, 5, 3, undefined }; var i: u32 = 2; v[i] = 1; try expect(v[1] == 5); @@ -351,7 +350,7 @@ test "initialize vector which is a struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const Vec4Obj = struct { - data: Vector(4, f32), + data: @Vector(4, f32), }; const S = struct { @@ -376,17 +375,17 @@ test "vector comparison operators" { const S = struct { fn doTheTest() !void { { - const v1: Vector(4, bool) = [_]bool{ true, false, true, false }; - const v2: Vector(4, bool) = [_]bool{ false, true, false, true }; + var v1: @Vector(4, bool) = [_]bool{ true, false, true, false }; + var v2: @Vector(4, bool) = [_]bool{ false, true, false, true }; try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 == v1))); try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v1 == v2))); try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 != v2))); try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v2 != v2))); } { - const v1 = @splat(4, @as(u32, 0xc0ffeeee)); - const v2: Vector(4, c_uint) = v1; - const v3 = @splat(4, @as(u32, 0xdeadbeef)); + var v1 = @splat(4, @as(u32, 0xc0ffeeee)); + var v2: @Vector(4, c_uint) = v1; + var v3 = @splat(4, @as(u32, 0xdeadbeef)); try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 == v2))); try expect(mem.eql(bool, &@as([4]bool, @splat(4, false)), &@as([4]bool, v1 == v3))); try expect(mem.eql(bool, &@as([4]bool, @splat(4, true)), &@as([4]bool, v1 != v3))); @@ -414,7 +413,7 @@ test "vector division operators" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { - fn doTheTestDiv(comptime T: type, x: Vector(4, T), y: Vector(4, T)) !void { + fn doTheTestDiv(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void { if (!comptime std.meta.trait.isSignedInt(T)) { const d0 = x / y; for (@as([4]T, d0)) |v, i| { @@ -435,7 +434,7 @@ test "vector division operators" { } } - fn doTheTestMod(comptime T: type, x: Vector(4, T), y: Vector(4, T)) !void { + fn doTheTestMod(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void { if ((!comptime std.meta.trait.isSignedInt(T)) and @typeInfo(T) != .Float) { const r0 = x % y; for (@as([4]T, r0)) |v, i| { @@ -502,7 +501,7 @@ test "vector bitwise not operator" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { - fn doTheTestNot(comptime T: type, x: Vector(4, T)) !void { + fn doTheTestNot(comptime T: type, x: @Vector(4, T)) !void { var y = ~x; for (@as([4]T, y)) |v, i| { try expect(~x[i] == v); @@ -538,8 +537,8 @@ test "vector shift operators" { const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; - var xv = @as(Vector(N, TX), x); - var yv = @as(Vector(N, TY), y); + var xv = @as(@Vector(N, TX), x); + var yv = @as(@Vector(N, TY), y); var z0 = xv >> yv; for (@as([N]TX, z0)) |v, i| { @@ -555,8 +554,8 @@ test "vector shift operators" { const TX = @typeInfo(@TypeOf(x)).Array.child; const TY = @typeInfo(@TypeOf(y)).Array.child; - var xv = @as(Vector(N, TX), x); - var yv = @as(Vector(N, TY), y); + var xv = @as(@Vector(N, TX), x); + var yv = @as(@Vector(N, TY), y); var z = if (dir == .Left) @shlExact(xv, yv) else @shrExact(xv, yv); for (@as([N]TX, z)) |v, i| { @@ -768,10 +767,10 @@ test "mask parameter of @shuffle is comptime scope" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - const __v4hi = std.meta.Vector(4, i16); + const __v4hi = @Vector(4, i16); var v4_a = __v4hi{ 0, 0, 0, 0 }; var v4_b = __v4hi{ 0, 0, 0, 0 }; - var shuffled: __v4hi = @shuffle(i16, v4_a, v4_b, std.meta.Vector(4, i32){ + var shuffled: __v4hi = @shuffle(i16, v4_a, v4_b, @Vector(4, i32){ std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), std.zig.c_translation.shuffleVectorIndex(0, @typeInfo(@TypeOf(v4_a)).Vector.len), @@ -790,18 +789,18 @@ test "saturating add" { const S = struct { fn doTheTest() !void { { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 - const u8x3 = std.meta.Vector(3, u8); - const lhs = u8x3{ 255, 254, 1 }; - const rhs = u8x3{ 1, 2, 255 }; - const result = lhs +| rhs; + const u8x3 = @Vector(3, u8); + var lhs = u8x3{ 255, 254, 1 }; + var rhs = u8x3{ 1, 2, 255 }; + var result = lhs +| rhs; const expected = u8x3{ 255, 255, 255 }; try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); } { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 - const i8x3 = std.meta.Vector(3, i8); - const lhs = i8x3{ 127, 126, 1 }; - const rhs = i8x3{ 1, 2, 127 }; - const result = lhs +| rhs; + const i8x3 = @Vector(3, i8); + var lhs = i8x3{ 127, 126, 1 }; + var rhs = i8x3{ 1, 2, 127 }; + var result = lhs +| rhs; const expected = i8x3{ 127, 127, 127 }; try expect(mem.eql(i8, &@as([3]i8, expected), &@as([3]i8, result))); } @@ -821,10 +820,10 @@ test "saturating subtraction" { const S = struct { fn doTheTest() !void { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 - const u8x3 = std.meta.Vector(3, u8); - const lhs = u8x3{ 0, 0, 0 }; - const rhs = u8x3{ 255, 255, 255 }; - const result = lhs -| rhs; + const u8x3 = @Vector(3, u8); + var lhs = u8x3{ 0, 0, 0 }; + var rhs = u8x3{ 255, 255, 255 }; + var result = lhs -| rhs; const expected = u8x3{ 0, 0, 0 }; try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); } @@ -846,10 +845,10 @@ test "saturating multiplication" { const S = struct { fn doTheTest() !void { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 - const u8x3 = std.meta.Vector(3, u8); - const lhs = u8x3{ 2, 2, 2 }; - const rhs = u8x3{ 255, 255, 255 }; - const result = lhs *| rhs; + const u8x3 = @Vector(3, u8); + var lhs = u8x3{ 2, 2, 2 }; + var rhs = u8x3{ 255, 255, 255 }; + var result = lhs *| rhs; const expected = u8x3{ 255, 255, 255 }; try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); } @@ -869,10 +868,10 @@ test "saturating shift-left" { const S = struct { fn doTheTest() !void { // Broken out to avoid https://github.com/ziglang/zig/issues/11251 - const u8x3 = std.meta.Vector(3, u8); - const lhs = u8x3{ 1, 1, 1 }; - const rhs = u8x3{ 255, 255, 255 }; - const result = lhs <<| rhs; + const u8x3 = @Vector(3, u8); + var lhs = u8x3{ 1, 1, 1 }; + var rhs = u8x3{ 255, 255, 255 }; + var result = lhs <<| rhs; const expected = u8x3{ 255, 255, 255 }; try expect(mem.eql(u8, &@as([3]u8, expected), &@as([3]u8, result))); }